无法使用保存的模型作为训练基线的 MlpPolicy 的起点?
Unable to use saved model as starting point for training Baselines' MlpPolicy?
我目前正在使用 OpenAI 基线中的代码来训练模型,在我的 train.py
中使用以下代码:
from baselines.common import tf_util as U
import tensorflow as tf
import gym, logging
from visak_dartdeepmimic import VisakDartDeepMimicArgParse
def train(env, initial_params_path,
save_interval, out_prefix, num_timesteps, num_cpus):
from baselines.ppo1 import mlp_policy, pposgd_simple
sess = U.make_session(num_cpu=num_cpus).__enter__()
U.initialize()
def policy_fn(name, ob_space, ac_space):
print("Policy with name: ", name)
policy = mlp_policy.MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space,
hid_size=64, num_hid_layers=2)
saver = tf.train.Saver()
if initial_params_path is not None:
print("Tried to restore from ", initial_params_path)
saver.restore(tf.get_default_session(), initial_params_path)
return policy
def callback_fn(local_vars, global_vars):
iters = local_vars["iters_so_far"]
saver = tf.train.Saver()
if iters % save_interval == 0:
saver.save(sess, out_prefix + str(iters))
pposgd_simple.learn(env, policy_fn,
max_timesteps=num_timesteps,
callback=callback_fn,
timesteps_per_actorbatch=2048,
clip_param=0.2, entcoeff=0.0,
optim_epochs=10, optim_stepsize=3e-4, optim_batchsize=64,
gamma=1.0, lam=0.95, schedule='linear',
)
env.close()
基于 OpenAI 本身提供的代码 in the baselines repository
这工作正常,除了我得到一些看起来很奇怪的学习曲线,我怀疑这是由于传递给 learn
函数的一些超参数导致性能随着事情的进行而衰减/高方差(尽管我不确定)
无论如何,为了证实这个假设,我想重新训练模型,但不是从头开始:我想从高点开始:比如说,迭代 1600,为此我有一个保存的模型(在 callback_fn
中用 saver.save
保存了它
所以现在我调用 train
函数,但这次我为它提供了一个 inital_params_path
指向迭代 1600 的保存前缀。据我了解,对 [=20= 的调用] 在 policy_fn
中应该将模型恢复 "reset" 到它在 1teration 1600 时的位置(并且我已经确认加载例程使用 print 语句运行)
然而,在实践中我发现它几乎就像没有加载任何东西一样。例如,如果我得到像
这样的统计数据
----------------------------------
| EpLenMean | 74.2 |
| EpRewMean | 38.7 |
| EpThisIter | 209 |
| EpisodesSoFar | 662438 |
| TimeElapsed | 2.15e+04 |
| TimestepsSoFar | 26230266 |
| ev_tdlam_before | 0.95 |
| loss_ent | 2.7640965 |
| loss_kl | 0.09064759 |
| loss_pol_entpen | 0.0 |
| loss_pol_surr | -0.048767302 |
| loss_vf_loss | 3.8620138 |
----------------------------------
对于第 1600 次迭代,然后对于新试验的第 1 次迭代(表面上使用 1600 的参数作为起点),我得到类似
的结果
----------------------------------
| EpLenMean | 2.12 |
| EpRewMean | 0.486 |
| EpThisIter | 7676 |
| EpisodesSoFar | 7676 |
| TimeElapsed | 12.3 |
| TimestepsSoFar | 16381 |
| ev_tdlam_before | -4.47 |
| loss_ent | 45.355236 |
| loss_kl | 0.016298374 |
| loss_pol_entpen | 0.0 |
| loss_pol_surr | -0.039200217 |
| loss_vf_loss | 0.043219414 |
----------------------------------
回到原点(这是我的模型从头开始训练的地方)
有趣的是我知道模型至少被正确保存了,因为我实际上可以使用 eval.py
重播它
from baselines.common import tf_util as U
from baselines.ppo1 import mlp_policy, pposgd_simple
import numpy as np
import tensorflow as tf
class PolicyLoaderAgent(object):
"""The world's simplest agent!"""
def __init__(self, param_path, obs_space, action_space):
self.action_space = action_space
self.actor = mlp_policy.MlpPolicy("pi", obs_space, action_space,
hid_size = 64, num_hid_layers=2)
U.initialize()
saver = tf.train.Saver()
saver.restore(tf.get_default_session(), param_path)
def act(self, observation, reward, done):
action2, unknown = self.actor.act(False, observation)
return action2
if __name__ == "__main__":
parser = VisakDartDeepMimicArgParse()
parser.add_argument("--params-prefix", required=True, type=str)
args = parser.parse_args()
env = parser.get_env()
U.make_session(num_cpu=1).__enter__()
U.initialize()
agent = PolicyLoaderAgent(args.params_prefix, env.observation_space, env.action_space)
while True:
ob = env.reset(0, pos_stdv=0, vel_stdv=0)
done = False
while not done:
action = agent.act(ob, reward, done)
ob, reward, done, _ = env.step(action)
env.render()
我可以清楚地看到,与未经训练的基线相比,它学到了一些东西。加载操作在两个文件中是相同的(或者更确切地说,如果那里有错误,那么我找不到它),所以在我看来 train.py
可能正在正确加载模型,然后由于某些原因在pposdg_simple.learn
function's,及时忘记它。
谁能解释一下这种情况?
不确定这是否仍然相关,因为自发布此问题以来基线存储库已经发生了很大变化,但似乎您实际上并没有在恢复变量之前对其进行初始化。尝试将 U.initialize()
的调用移到 policy_fn
:
中
def policy_fn(name, ob_space, ac_space):
print("Policy with name: ", name)
policy = mlp_policy.MlpPolicy(name=name, ob_space=ob_space,
ac_space=ac_space, hid_size=64, num_hid_layers=2)
saver = tf.train.Saver()
if initial_params_path is not None:
print("Tried to restore from ", initial_params_path)
U.initialize()
saver.restore(tf.get_default_session(), initial_params_path)
return policy
我目前正在使用 OpenAI 基线中的代码来训练模型,在我的 train.py
中使用以下代码:
from baselines.common import tf_util as U
import tensorflow as tf
import gym, logging
from visak_dartdeepmimic import VisakDartDeepMimicArgParse
def train(env, initial_params_path,
save_interval, out_prefix, num_timesteps, num_cpus):
from baselines.ppo1 import mlp_policy, pposgd_simple
sess = U.make_session(num_cpu=num_cpus).__enter__()
U.initialize()
def policy_fn(name, ob_space, ac_space):
print("Policy with name: ", name)
policy = mlp_policy.MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space,
hid_size=64, num_hid_layers=2)
saver = tf.train.Saver()
if initial_params_path is not None:
print("Tried to restore from ", initial_params_path)
saver.restore(tf.get_default_session(), initial_params_path)
return policy
def callback_fn(local_vars, global_vars):
iters = local_vars["iters_so_far"]
saver = tf.train.Saver()
if iters % save_interval == 0:
saver.save(sess, out_prefix + str(iters))
pposgd_simple.learn(env, policy_fn,
max_timesteps=num_timesteps,
callback=callback_fn,
timesteps_per_actorbatch=2048,
clip_param=0.2, entcoeff=0.0,
optim_epochs=10, optim_stepsize=3e-4, optim_batchsize=64,
gamma=1.0, lam=0.95, schedule='linear',
)
env.close()
基于 OpenAI 本身提供的代码 in the baselines repository
这工作正常,除了我得到一些看起来很奇怪的学习曲线,我怀疑这是由于传递给 learn
函数的一些超参数导致性能随着事情的进行而衰减/高方差(尽管我不确定)
无论如何,为了证实这个假设,我想重新训练模型,但不是从头开始:我想从高点开始:比如说,迭代 1600,为此我有一个保存的模型(在 callback_fn
saver.save
保存了它
所以现在我调用 train
函数,但这次我为它提供了一个 inital_params_path
指向迭代 1600 的保存前缀。据我了解,对 [=20= 的调用] 在 policy_fn
中应该将模型恢复 "reset" 到它在 1teration 1600 时的位置(并且我已经确认加载例程使用 print 语句运行)
然而,在实践中我发现它几乎就像没有加载任何东西一样。例如,如果我得到像
这样的统计数据----------------------------------
| EpLenMean | 74.2 |
| EpRewMean | 38.7 |
| EpThisIter | 209 |
| EpisodesSoFar | 662438 |
| TimeElapsed | 2.15e+04 |
| TimestepsSoFar | 26230266 |
| ev_tdlam_before | 0.95 |
| loss_ent | 2.7640965 |
| loss_kl | 0.09064759 |
| loss_pol_entpen | 0.0 |
| loss_pol_surr | -0.048767302 |
| loss_vf_loss | 3.8620138 |
----------------------------------
对于第 1600 次迭代,然后对于新试验的第 1 次迭代(表面上使用 1600 的参数作为起点),我得到类似
的结果----------------------------------
| EpLenMean | 2.12 |
| EpRewMean | 0.486 |
| EpThisIter | 7676 |
| EpisodesSoFar | 7676 |
| TimeElapsed | 12.3 |
| TimestepsSoFar | 16381 |
| ev_tdlam_before | -4.47 |
| loss_ent | 45.355236 |
| loss_kl | 0.016298374 |
| loss_pol_entpen | 0.0 |
| loss_pol_surr | -0.039200217 |
| loss_vf_loss | 0.043219414 |
----------------------------------
回到原点(这是我的模型从头开始训练的地方)
有趣的是我知道模型至少被正确保存了,因为我实际上可以使用 eval.py
from baselines.common import tf_util as U
from baselines.ppo1 import mlp_policy, pposgd_simple
import numpy as np
import tensorflow as tf
class PolicyLoaderAgent(object):
"""The world's simplest agent!"""
def __init__(self, param_path, obs_space, action_space):
self.action_space = action_space
self.actor = mlp_policy.MlpPolicy("pi", obs_space, action_space,
hid_size = 64, num_hid_layers=2)
U.initialize()
saver = tf.train.Saver()
saver.restore(tf.get_default_session(), param_path)
def act(self, observation, reward, done):
action2, unknown = self.actor.act(False, observation)
return action2
if __name__ == "__main__":
parser = VisakDartDeepMimicArgParse()
parser.add_argument("--params-prefix", required=True, type=str)
args = parser.parse_args()
env = parser.get_env()
U.make_session(num_cpu=1).__enter__()
U.initialize()
agent = PolicyLoaderAgent(args.params_prefix, env.observation_space, env.action_space)
while True:
ob = env.reset(0, pos_stdv=0, vel_stdv=0)
done = False
while not done:
action = agent.act(ob, reward, done)
ob, reward, done, _ = env.step(action)
env.render()
我可以清楚地看到,与未经训练的基线相比,它学到了一些东西。加载操作在两个文件中是相同的(或者更确切地说,如果那里有错误,那么我找不到它),所以在我看来 train.py
可能正在正确加载模型,然后由于某些原因在pposdg_simple.learn
function's,及时忘记它。
谁能解释一下这种情况?
不确定这是否仍然相关,因为自发布此问题以来基线存储库已经发生了很大变化,但似乎您实际上并没有在恢复变量之前对其进行初始化。尝试将 U.initialize()
的调用移到 policy_fn
:
def policy_fn(name, ob_space, ac_space):
print("Policy with name: ", name)
policy = mlp_policy.MlpPolicy(name=name, ob_space=ob_space,
ac_space=ac_space, hid_size=64, num_hid_layers=2)
saver = tf.train.Saver()
if initial_params_path is not None:
print("Tried to restore from ", initial_params_path)
U.initialize()
saver.restore(tf.get_default_session(), initial_params_path)
return policy