Keras: AttributeError: 'Adam' object has no attribute '_name'
Keras: AttributeError: 'Adam' object has no attribute '_name'
我想编译我的 DQN 代理,但出现错误:
AttributeError: 'Adam' object has no attribute '_name'
,
DQN = buildAgent(model, actions)
DQN.compile(Adam(lr=1e-3), metrics=['mae'])
我尝试添加假 _name
但它不起作用,我正在学习教程并且它在导师的机器上工作,它可能是一些新的更新更改但如何解决这个问题
这是我的完整代码:
from keras.layers import Dense, Flatten
import gym
from keras.optimizer_v1 import Adam
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory
env = gym.make('CartPole-v0')
states = env.observation_space.shape[0]
actions = env.action_space.n
episodes = 10
def buildModel(statez, actiones):
model = Sequential()
model.add(Flatten(input_shape=(1, statez)))
model.add(Dense(24, activation='relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(actiones, activation='linear'))
return model
model = buildModel(states, actions)
def buildAgent(modell, actionz):
policy = BoltzmannQPolicy()
memory = SequentialMemory(limit=50000, window_length=1)
dqn = DQNAgent(model=modell, memory=memory, policy=policy, nb_actions=actionz, nb_steps_warmup=10, target_model_update=1e-2)
return dqn
DQN = buildAgent(model, actions)
DQN.compile(Adam(lr=1e-3), metrics=['mae'])
DQN.fit(env, nb_steps=50000, visualize=False, verbose=1)
您的错误来自使用 from keras.optimizer_v1 import Adam
导入 Adam
,您可以使用 TensorFlow >= v2
中的 tf.keras.optimizers.Adam
解决您的问题,如下所示:
(不推荐使用 lr
参数,最好使用 learning_rate
代替。)
# !pip install keras-rl2
import tensorflow as tf
from keras.layers import Dense, Flatten
import gym
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory
env = gym.make('CartPole-v0')
states = env.observation_space.shape[0]
actions = env.action_space.n
episodes = 10
def buildModel(statez, actiones):
model = tf.keras.Sequential()
model.add(Flatten(input_shape=(1, statez)))
model.add(Dense(24, activation='relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(actiones, activation='linear'))
return model
def buildAgent(modell, actionz):
policy = BoltzmannQPolicy()
memory = SequentialMemory(limit=50000, window_length=1)
dqn = DQNAgent(model=modell, memory=memory, policy=policy,
nb_actions=actionz, nb_steps_warmup=10,
target_model_update=1e-2)
return dqn
model = buildModel(states, actions)
DQN = buildAgent(model, actions)
DQN.compile(tf.keras.optimizers.Adam(learning_rate=1e-3), metrics=['mae'])
DQN.fit(env, nb_steps=50000, visualize=False, verbose=1)
我想编译我的 DQN 代理,但出现错误:
AttributeError: 'Adam' object has no attribute '_name'
,
DQN = buildAgent(model, actions)
DQN.compile(Adam(lr=1e-3), metrics=['mae'])
我尝试添加假 _name
但它不起作用,我正在学习教程并且它在导师的机器上工作,它可能是一些新的更新更改但如何解决这个问题
这是我的完整代码:
from keras.layers import Dense, Flatten
import gym
from keras.optimizer_v1 import Adam
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory
env = gym.make('CartPole-v0')
states = env.observation_space.shape[0]
actions = env.action_space.n
episodes = 10
def buildModel(statez, actiones):
model = Sequential()
model.add(Flatten(input_shape=(1, statez)))
model.add(Dense(24, activation='relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(actiones, activation='linear'))
return model
model = buildModel(states, actions)
def buildAgent(modell, actionz):
policy = BoltzmannQPolicy()
memory = SequentialMemory(limit=50000, window_length=1)
dqn = DQNAgent(model=modell, memory=memory, policy=policy, nb_actions=actionz, nb_steps_warmup=10, target_model_update=1e-2)
return dqn
DQN = buildAgent(model, actions)
DQN.compile(Adam(lr=1e-3), metrics=['mae'])
DQN.fit(env, nb_steps=50000, visualize=False, verbose=1)
您的错误来自使用 from keras.optimizer_v1 import Adam
导入 Adam
,您可以使用 TensorFlow >= v2
中的 tf.keras.optimizers.Adam
解决您的问题,如下所示:
(不推荐使用 lr
参数,最好使用 learning_rate
代替。)
# !pip install keras-rl2
import tensorflow as tf
from keras.layers import Dense, Flatten
import gym
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory
env = gym.make('CartPole-v0')
states = env.observation_space.shape[0]
actions = env.action_space.n
episodes = 10
def buildModel(statez, actiones):
model = tf.keras.Sequential()
model.add(Flatten(input_shape=(1, statez)))
model.add(Dense(24, activation='relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(actiones, activation='linear'))
return model
def buildAgent(modell, actionz):
policy = BoltzmannQPolicy()
memory = SequentialMemory(limit=50000, window_length=1)
dqn = DQNAgent(model=modell, memory=memory, policy=policy,
nb_actions=actionz, nb_steps_warmup=10,
target_model_update=1e-2)
return dqn
model = buildModel(states, actions)
DQN = buildAgent(model, actions)
DQN.compile(tf.keras.optimizers.Adam(learning_rate=1e-3), metrics=['mae'])
DQN.fit(env, nb_steps=50000, visualize=False, verbose=1)