Skip to content

[HANDS-ON BUG] Unit 6 Package to hub #646

@JacquelineJR

Description

@JacquelineJR

Package_to_hub is not working

When I try to push trained agent to hub using package_to_hub cell. An error occurs.
https://colab.research.google.com/drive/1i5qcc4OVOBSidYwmMbJx0FqzztX2EfMJ#scrollTo=J-cC-Feg9iMm

Material

  • Did you use Google Colab? Yes

If not:

  • Your Operating system (OS)
  • Version of your OS

The error code is as follows:
ℹ This function will save, evaluate, generate a video of your agent,
create a model card and push everything to the hub. It might take up to 1min.
This is a work in progress: if you encounter a bug, please open an issue.

error Traceback (most recent call last)
/tmp/ipython-input-605551018.py in <cell line: 0>()
1 from huggingface_sb3 import package_to_hub
2
----> 3 package_to_hub(
4 model=model,
5 model_name=f"a2c-{env_id}",

12 frames
/usr/local/lib/python3.12/dist-packages/huggingface_sb3/push_to_hub.py in package_to_hub(model, model_name, model_architecture, env_id, eval_env, repo_id, commit_message, is_deterministic, n_eval_episodes, token, video_length, logs)
375
376 # Step 3: Evaluate the agent
--> 377 mean_reward, std_reward = _evaluate_agent(
378 model, eval_env, n_eval_episodes, is_deterministic, tmpdirname
379 )

/usr/local/lib/python3.12/dist-packages/huggingface_sb3/push_to_hub.py in _evaluate_agent(model, eval_env, n_eval_episodes, is_deterministic, local_path)
72 """
73 # Step 1: Evaluate the agent
---> 74 mean_reward, std_reward = evaluate_policy(
75 model, eval_env, n_eval_episodes, is_deterministic
76 )

/usr/local/lib/python3.12/dist-packages/stable_baselines3/common/evaluation.py in evaluate_policy(model, env, n_eval_episodes, deterministic, render, callback, reward_threshold, return_episode_rewards, warn)
85 current_rewards = np.zeros(n_envs)
86 current_lengths = np.zeros(n_envs, dtype="int")
---> 87 observations = env.reset()
88 states = None
89 episode_starts = np.ones((env.num_envs,), dtype=bool)

/usr/local/lib/python3.12/dist-packages/stable_baselines3/common/vec_env/vec_normalize.py in reset(self)
295 :return: first observation of the episode
296 """
--> 297 obs = self.venv.reset()
298 assert isinstance(obs, (np.ndarray, dict))
299 self.old_obs = obs

/usr/local/lib/python3.12/dist-packages/stable_baselines3/common/vec_env/dummy_vec_env.py in reset(self)
76 for env_idx in range(self.num_envs):
77 maybe_options = {"options": self._options[env_idx]} if self._options[env_idx] else {}
---> 78 obs, self.reset_infos[env_idx] = self.envs[env_idx].reset(seed=self._seeds[env_idx], **maybe_options)
79 self._save_obs(env_idx, obs)
80 # Seeds and options are only used once

/usr/local/lib/python3.12/dist-packages/gymnasium/wrappers/common.py in reset(self, seed, options)
144 """
145 self._elapsed_steps = 0
--> 146 return super().reset(seed=seed, options=options)
147
148 @Property

/usr/local/lib/python3.12/dist-packages/gymnasium/core.py in reset(self, seed, options)
331 ) -> tuple[WrapperObsType, dict[str, Any]]:
332 """Uses the :meth:reset of the :attr:env that can be overwritten to change the returned data."""
--> 333 return self.env.reset(seed=seed, options=options)
334
335 def render(self) -> RenderFrame | list[RenderFrame] | None:

/usr/local/lib/python3.12/dist-packages/gymnasium/wrappers/common.py in reset(self, seed, options)
398 """Resets the environment with kwargs."""
399 self._has_reset = True
--> 400 return super().reset(seed=seed, options=options)
401
402 def render(self) -> RenderFrame | list[RenderFrame] | None:

/usr/local/lib/python3.12/dist-packages/gymnasium/core.py in reset(self, seed, options)
331 ) -> tuple[WrapperObsType, dict[str, Any]]:
332 """Uses the :meth:reset of the :attr:env that can be overwritten to change the returned data."""
--> 333 return self.env.reset(seed=seed, options=options)
334
335 def render(self) -> RenderFrame | list[RenderFrame] | None:

/usr/local/lib/python3.12/dist-packages/gymnasium/wrappers/common.py in reset(self, seed, options)
293 return env_reset_passive_checker(self.env, seed=seed, options=options)
294 else:
--> 295 return self.env.reset(seed=seed, options=options)
296
297 def render(self) -> RenderFrame | list[RenderFrame] | None:

/usr/local/lib/python3.12/dist-packages/panda_gym/envs/core.py in reset(self, seed, options)
279 super().reset(seed=seed, options=options)
280 self.task.np_random, seed = seeding.np_random(seed)
--> 281 with self.sim.no_rendering():
282 self.robot.reset()
283 self.task.reset()

/usr/lib/python3.12/contextlib.py in enter(self)
135 del self.args, self.kwds, self.func
136 try:
--> 137 return next(self.gen)
138 except StopIteration:
139 raise RuntimeError("generator didn't yield") from None

/usr/local/lib/python3.12/dist-packages/panda_gym/pybullet.py in no_rendering(self)
387 def no_rendering(self) -> Iterator[None]:
388 """Disable rendering within this context."""
--> 389 self.physics_client.configureDebugVisualizer(self.physics_client.COV_ENABLE_RENDERING, 0)
390 yield
391 self.physics_client.configureDebugVisualizer(self.physics_client.COV_ENABLE_RENDERING, 1)

error: Not connected to physics server.

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions