PPO算法的学习函数显示错误

问题描述 投票:0回答:1

我正在尝试使用 OpenAI gym 的 CarRacing-v2 来使用 PPO 算法训练汽车

import os
import gym
from stable_baselines3 import PPO
from stable_baselines3.common.vec_env import DummyVecEnv
from stable_baselines3.common.evaluation import evaluate_policy
environment_name = 'CarRacing-v2'
env = gym.make(environment_name, render_mode='human')
env.reset()
env.close()
environment_name = 'CarRacing-v2'
env = gym.make(environment_name, render_mode='human')
episodes = 5

for episode in range(1, episodes+1):
    
    observation, info = env.reset()
    terminated = False
    truncated = False
    score = 0 
    
    while not (terminated or truncated):
        #env.render()
        action = env.action_space.sample()
        observation, reward, terminated, truncated, info = env.step(action)
        score += reward
          
    print(f'Episode: {episode} Score: {score}')
    
env.close()
env = gym.make(environment_name)
env = DummyVecEnv([lambda: env])
log_path = os.path.join('Training', 'Logs')
model = PPO('CnnPolicy', env, verbose=1, tensorboard_log=log_path)
model.learn(total_timesteps=200000)

这是我的代码。但是最后一行

model.learn(total_timesteps)
显示错误:

ValueError                                Traceback (most recent call last)
Cell In[19], line 1
----> 1 model.learn(total_timesteps=200000, reset_num_timesteps=False)

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\stable_baselines3\ppo\ppo.py:299, in PPO.learn(self, total_timesteps, callback, log_interval, eval_env, eval_freq, n_eval_episodes, tb_log_name, eval_log_path, reset_num_timesteps)
    286 def learn(
    287     self,
    288     total_timesteps: int,
   (...)
    296     reset_num_timesteps: bool = True,
    297 ) -> "PPO":
--> 299     return super(PPO, self).learn(
    300         total_timesteps=total_timesteps,
    301         callback=callback,
    302         log_interval=log_interval,
    303         eval_env=eval_env,
    304         eval_freq=eval_freq,
    305         n_eval_episodes=n_eval_episodes,
    306         tb_log_name=tb_log_name,
    307         eval_log_path=eval_log_path,
    308         reset_num_timesteps=reset_num_timesteps,
    309     )

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\stable_baselines3\common\on_policy_algorithm.py:242, in OnPolicyAlgorithm.learn(self, total_timesteps, callback, log_interval, eval_env, eval_freq, n_eval_episodes, tb_log_name, eval_log_path, reset_num_timesteps)
    228 def learn(
    229     self,
    230     total_timesteps: int,
   (...)
    238     reset_num_timesteps: bool = True,
    239 ) -> "OnPolicyAlgorithm":
    240     iteration = 0
--> 242     total_timesteps, callback = self._setup_learn(
    243         total_timesteps, eval_env, callback, eval_freq, n_eval_episodes, eval_log_path, reset_num_timesteps, tb_log_name
    244     )
    246     callback.on_training_start(locals(), globals())
    248     while self.num_timesteps < total_timesteps:

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\stable_baselines3\common\base_class.py:429, in BaseAlgorithm._setup_learn(self, total_timesteps, eval_env, callback, eval_freq, n_eval_episodes, log_path, reset_num_timesteps, tb_log_name)
    427 # Avoid resetting the environment when calling ``.learn()`` consecutive times
    428 if reset_num_timesteps or self._last_obs is None:
--> 429     self._last_obs = self.env.reset()  # pytype: disable=annotation-type-mismatch
    430     self._last_episode_starts = np.ones((self.env.num_envs,), dtype=bool)
    431     # Retrieve unnormalized observation for saving into the buffer

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\stable_baselines3\common\vec_env\vec_transpose.py:110, in VecTransposeImage.reset(self)
    106 def reset(self) -> Union[np.ndarray, Dict]:
    107     """
    108     Reset all environments
    109     """
--> 110     return self.transpose_observations(self.venv.reset())

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\stable_baselines3\common\vec_env\dummy_vec_env.py:62, in DummyVecEnv.reset(self)
     60 for env_idx in range(self.num_envs):
     61     obs = self.envs[env_idx].reset()
---> 62     self._save_obs(env_idx, obs)
     63 return self._obs_from_buf()

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\stable_baselines3\common\vec_env\dummy_vec_env.py:92, in DummyVecEnv._save_obs(self, env_idx, obs)
     90 for key in self.keys:
     91     if key is None:
---> 92         self.buf_obs[key][env_idx] = obs
     93     else:
     94         self.buf_obs[key][env_idx] = obs[key]

ValueError: setting an array element with a sequence. The requested array has an inhomogeneous shape after 1 dimensions. The detected shape was (2,) + inhomogeneous part.

我正在使用 stable-baselines v 1.7.0,gym 版本 0.26.0。 python 已升级到最新版本。

我试图在整个互联网上找到解决这个问题的方法,但没有成功。

python machine-learning reinforcement-learning openai-gym
1个回答
0
投票

stablebaselines3 需要使用 gym==0.21.0 as

pip install gym==0.21.0

在这种情况下,您必须使用“CarRacing-v0”

import os
import gym
from stable_baselines3 import PPO
from stable_baselines3.common.vec_env import DummyVecEnv
from stable_baselines3.common.evaluation import evaluate_policy
environment_name = 'CarRacing-v0'
env = gym.make(environment_name)
env.reset()
env.close()
episodes = 5

for episode in range(1, episodes+1):
    
    observation = env.reset()
    done = False
    score = 0 
    
    while not done:
        #env.render()
        action = env.action_space.sample()
        observation, reward, done, info = env.step(action)
        score += reward
          
    print(f'Episode: {episode} Score: {score}')
    
env.close()
env = gym.make(environment_name)
env = DummyVecEnv([lambda: env])
log_path = os.path.join('Training', 'Logs')
model = PPO('CnnPolicy', env, verbose=1, tensorboard_log=log_path)
model.learn(total_timesteps=200000)
© www.soinside.com 2019 - 2024. All rights reserved.