RL-Coach简单行为克隆示例

问题描述 投票:0回答:1

我想用以下方法做一个简单的BC实验 RL-教练 和一个定制的 健身房 的环境。基于这样的理念 这个 我正试图预测 目标 根据 f_1, f_2f_3. 我使用的健身房环境是。

import random
import gym
from gym import spaces
import pandas as pd
import numpy as np


class TestEnvOne(gym.Env):

    def __init__(self, max_time):
        super(TestEnvOne, self).__init__()
        self.max_time = max_time
        f_1 = np.sin(np.arange(self.max_time))
        f_2 = np.cos(np.arange(self.max_time))
        f_3 = np.tan(np.arange(self.max_time))
        target = np.roll(f_1, 1) + np.roll(f_2, 2) + np.roll(f_3, 3)
        self.df = pd.DataFrame({'target': target, 'f_1': f_1, 'f_2': f_2, 'f_3': f_3})
        self.max_target = self.df.max()['target']
        self.min_target = self.df.min()['target']
        self.max_f_1 = self.df.max()['f_1']
        self.max_f_2 = self.df.max()['f_2']
        self.max_f_3 = self.df.max()['f_3']
        self.min_f_1 = self.df.min()['f_1']
        self.min_f_2 = self.df.min()['f_2']
        self.min_f_3 = self.df.min()['f_3']
        self.start_step = 0
        self.current_step = 0

        # Actions
        self.action_space = spaces.Box(
            low=np.array([0, ]), high=np.array([1, ]), dtype=np.float32)

        # Observation
        self.observation_space = gym.spaces.dict.Dict({'measurements':
                                                           spaces.Box(low=np.array([0, 0, 0]), high=np.array([1, 1, 1]),
                                                                      dtype=np.float32),
                                                       'desired_goal': spaces.Box(low=np.array([0]), high=np.array([1]),
                                                                                  dtype=np.float32)
                                                       })

        self.reward_range = (-1, 1)

    def _next_observation(self):
        # Scale to between 0-1
        frame = np.array([
            self.df.loc[self.current_step, 'target'] / self.max_target,
            self.df.loc[self.current_step, 'f_1'] / self.max_f_1,
            self.df.loc[self.current_step, 'f_2'] / self.max_f_2,
            self.df.loc[self.current_step, 'f_3'] / self.max_f_3,
        ])

        frame = {'desired_goal': self.df.loc[self.current_step, 'target'] / self.max_target,
                 'measurements': [
                     self.df.loc[self.current_step, 'f_1'] / self.max_f_1,
                     self.df.loc[self.current_step, 'f_2'] / self.max_f_2,
                     self.df.loc[self.current_step, 'f_3'] / self.max_f_3
                 ]}

        return frame

    def step(self, action):
        self.current_step += 1

        if self.current_step >= len(self.df.loc[:, 'target'].values):
            self.current_step = 0

        obs = self._next_observation()
        reward = obs['desired_goal'] - action[0]
        done = (self.current_step == self.start_step)

        return {'measurements': obs['measurements'], 'desired_goal': obs['desired_goal']}, reward, done, {}

    def reset(self):
        # Set the current step to a random point within the data frame
        self.start_step = random.randint(
            0, len(self.df.loc[:, 'target'].values) - 1)
        self.current_step = self.start_step

        return self._next_observation()

    def render(self, mode='human', close=False):
        # Render the environment to the screen

        print(f'Step: {self.current_step}')
        print(f'Diff: {self.diff}')
        print(f'Target: {self.df.loc[self.current_step, "target"]}')

我使用的预设是基于 厄运基本BC 为以下内容。

from rl_coach.agents.bc_agent import BCAgentParameters
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.base_parameters import VisualizationParameters, PresetValidationParameters
from rl_coach.core_types import TrainingSteps, EnvironmentEpisodes, EnvironmentSteps
from rl_coach.environments.gym_environment import GymVectorEnvironment
from rl_coach.graph_managers.basic_rl_graph_manager import BasicRLGraphManager
from rl_coach.graph_managers.graph_manager import ScheduleParameters
from rl_coach.memories.memory import MemoryGranularity


####################
# Graph Scheduling #
####################

schedule_params = ScheduleParameters()
schedule_params.improve_steps = TrainingSteps(2000)
schedule_params.steps_between_evaluation_periods = TrainingSteps(20)
schedule_params.evaluation_steps = EnvironmentEpisodes(5)
schedule_params.heatup_steps = EnvironmentSteps(10)

#########
# Agent #
#########
agent_params = BCAgentParameters()
agent_params.network_wrappers['main'].learning_rate = 0.00025
agent_params.memory.max_size = (MemoryGranularity.Transitions, 1000000)
agent_params.algorithm.discount = 0.99
agent_params.algorithm.num_consecutive_playing_steps = EnvironmentSteps(0)
#agent_params.network_wrappers['main'].batch_size = 1
agent_params.network_wrappers['main'].input_embedders_parameters = {'measurements': InputEmbedderParameters(),'desired_goal': InputEmbedderParameters()}


###############
# Environment #
###############
#envPath = 'env.TestEnvZero:TestEnvZero'
envPath = 'env.TestEnvOne:TestEnvOne'
env_params = GymVectorEnvironment(level=envPath)
env_params.additional_simulator_parameters = {'max_time': 2000}

########
# Test #
########
preset_validation_params = PresetValidationParameters()
preset_validation_params.test_using_a_trace_test = False

graph_manager = BasicRLGraphManager(agent_params=agent_params, env_params=env_params,
                                    schedule_params=schedule_params, vis_params=VisualizationParameters(),
                                    preset_validation_params=preset_validation_params)

当我运行以下命令时

coach -p presets/PruebaPresetBC.py

结果是

Please enter an experiment name: Test1

Creating graph - name: BasicRLGraphManager
Creating agent - name: agent
simple_rl_graph: Starting heatup
2020-04-30-16:21:37.128831 Heatup - Name: main_level/agent Worker: 0 Episode: 1 Total reward: -998.28 Exploration: [0.1] Steps: 2000 Training iteration: 0 
Starting to improve simple_rl_graph task index 0
Traceback (most recent call last):
  File "/home/user/coach_env/bin/coach", line 8, in <module>
    sys.exit(main())
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/coach.py", line 777, in main
    launcher.launch()
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/coach.py", line 226, in launch
    self.run_graph_manager(graph_manager, args)
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/coach.py", line 612, in run_graph_manager
    self.start_single_threaded(task_parameters, graph_manager, args)
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/coach.py", line 674, in start_single_threaded
    start_graph(graph_manager=graph_manager, task_parameters=task_parameters)
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/coach.py", line 88, in start_graph
    graph_manager.improve()
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/graph_managers/graph_manager.py", line 547, in improve
    self.train_and_act(self.steps_between_evaluation_periods)
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/graph_managers/graph_manager.py", line 482, in train_and_act
    self.train()
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/graph_managers/graph_manager.py", line 408, in train
    [manager.train() for manager in self.level_managers]
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/graph_managers/graph_manager.py", line 408, in <listcomp>
    [manager.train() for manager in self.level_managers]
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/level_manager.py", line 187, in train
    [agent.train() for agent in self.agents.values()]
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/level_manager.py", line 187, in <listcomp>
    [agent.train() for agent in self.agents.values()]
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/agents/agent.py", line 741, in train
    total_loss, losses, unclipped_grads = self.learn_from_batch(batch)
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/agents/bc_agent.py", line 77, in learn_from_batch
    targets)
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/architectures/network_wrapper.py", line 171, in train_and_sync_networks
    importance_weights=importance_weights, no_accumulation=True)
  File "/home/user/coach_env/lib/python3.6/site-packages/rl_coach/architectures/tensorflow_components/architecture.py", line 365, in accumulate_gradients
    result = self.sess.run(fetches, feed_dict=feed_dict)
  File "/home/user/coach_env/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 950, in run
    run_metadata_ptr)
  File "/home/user/coach_env/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1149, in _run
    str(subfeed_t.get_shape())))
ValueError: Cannot feed value of shape (32,) for Tensor 'main_level/agent/main/online/network_0/desired_goal/desired_goal:0', which has shape '(?, 1)'

--------------------------------

Results stored at: ./experiments/Test1/30_4_2020-16_21_0
Total runtime: 0:00:06.481459

--------------------------------

我发现32号的形状与之有关

agent_params.network_wrappers['main'].batch_size

在预设中。但我不知道如何继续,也不知道如何解决这个问题。有时使用相同的代码会出现以下异常。

ValueError: Cannot feed value of shape (32, 3) for Tensor 'main_level/agent/main/online/network_0/measurements/measurements:0', which has shape '(?, 0)'

任何帮助感激不尽。

更新2020-05-04。

根据@MarcusRenshaw的建议,我在健身房的步骤和复位功能前增加了一个打印功能。就在错误之前,它调用重置函数,观察空间是。

reset: {'desired_goal': 0.00559788442127721, 'measurements': [0.6832680466354063, 0.7301735609948197, 0.00400035745607452]}

观察空间是一样的 形状(32,3)形状(32,) 误差。在加热过程中,步进函数的最后一个观测空间是

step: {'measurements': [-0.6434517999514073, 0.7654916425445919, -0.00359343140212023], 'desired_goal': -0.010710469493505773}

希望能帮到你。

python-3.x tensorflow machine-learning reinforcement-learning openai-gym
1个回答
© www.soinside.com 2019 - 2024. All rights reserved.