【发布时间】:2021-03-19 19:30:10
【问题描述】:
tf 版本:2.3.0
import numpy as np
import tensorflow as tf
from tf_agents.agents.reinforce import reinforce_agent
from tf_agents.drivers import dynamic_step_driver
from tf_agents.environments import suite_gym, tf_py_environment
from tf_agents.eval import metric_utils
from tf_agents.metrics import tf_metrics
from tf_agents.networks import actor_distribution_network
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.trajectories import trajectory
from tf_agents.utils import common
tf.compat.v1.enable_v2_behavior()
env_name='CartPole-v0'
num_iterations=1
collect_episodes_per_iteration=2
replay_buffer_capacity=2000
fc_layer_params=(100, )
learning_rate=1e-3
log_interval=5
num_eval_episodes=10
eval_interval=10
env=suite_gym.load(env_name)
env.reset()
time_step=env.reset()
train_py_env=suite_gym.load(env_name)
train_env=tf_py_environment.TFPyEnvironment(train_py_env)
actor_net=actor_distribution_network.ActorDistributionNetwork(train_env.observation_spec(), train_env.action_spec(), fc_layer_params=fc_layer_params)
optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)
train_step_counter=tf.compat.v2.Variable(0)
tf_agent=reinforce_agent.ReinforceAgent(train_env.time_step_spec(),
train_env.action_spec(),
actor_network=actor_net,
optimizer=optimizer,
normalize_returns=True,
train_step_counter=train_step_counter)
tf_agent.initialize()
eval_policy=tf_agent.policy
collect_policy=tf_agent.collect_policy
replay_buffer=tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec=tf_agent.collect_data_spec,
batch_size=train_env.batch_size,
max_length=replay_buffer_capacity
)
tf_agent.train=common.function(tf_agent.train)
def collect_episode(environment, policy, num_episodes):
episode_counter=0
environment.reset()
while episode_counter<num_episodes:
time_step=environment.current_time_step()
action_step=policy.action(time_step)
next_time_step=environment.step(action_step.action)
traj=trajectory.from_transition(time_step, action_step, next_time_step)
replay_buffer.add_batch(traj)
if traj.is_boundary():
episode_counter+=1
collect_episode(train_env, tf_agent.collect_policy, 1)
experience=replay_buffer.gather_all()
for _ in range(num_iterations):
collect_episode(train_env, tf_agent.collect_policy, collect_episodes_per_iteration)
from copy import copy
before=copy(tf_agent.trainable_variables)
experience=replay_buffer.gather_all()
train_loss=tf_agent.train(experience)
replay_buffer.clear()
after=copy(tf_agent.trainable_variables)
print('before==after?', before==after)
https://www.tensorflow.org/agents/tutorials/6_reinforce_tutorial
我正在关注 TFAgents 的教程,但我发现
before=copy(tf_agent.trainable_variables)
tf_agent.train(experience)
after=copy(tf_agent.trainable_variables)
那么“之前”应该与“之后”不同。 但是 (before==after) 总是代表“真”。
我对此感到非常困惑。我认为梯度可能为零。
但是,它不合理地导致模型的损失在训练步骤中继续减少。
在强化代理模块上,渐变胶带步骤写得很好..
我找不到问题所在... 无论训练步骤如何,甚至 tf_agent.policy.trainable_variables 都是相同的..
【问题讨论】:
-
您能否编辑您的问题并仅包含绝对必要的代码部分?否则,任何人都几乎没有机会阅读您的整个问题。阅读这篇文章:stackoverflow.com/help/minimal-reproducible-example
-
谢谢,我尽可能简短地更改了我的代码。
标签: tensorflow variables tensorflow2.0 reinforcement-learning agent