Search code examples
pythontensorflowkerasreinforcement-learning

Keras: AttributeError: 'Adam' object has no attribute '_name'


I want to compile my DQN Agent but I get error: AttributeError: 'Adam' object has no attribute '_name',

DQN = buildAgent(model, actions)
DQN.compile(Adam(lr=1e-3), metrics=['mae'])

I tried adding fake _name but it doesn't work, I'm following a tutorial and it works on tutor's machine, it's probably some new update change but how to fix this

Here is my full code:

from keras.layers import Dense, Flatten
import gym
from keras.optimizer_v1 import Adam
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory

env = gym.make('CartPole-v0')
states = env.observation_space.shape[0]
actions = env.action_space.n

episodes = 10

def buildModel(statez, actiones):
    model = Sequential()
    model.add(Flatten(input_shape=(1, statez)))
    model.add(Dense(24, activation='relu'))
    model.add(Dense(24, activation='relu'))
    model.add(Dense(actiones, activation='linear'))
    return model

model = buildModel(states, actions)

def buildAgent(modell, actionz):
    policy = BoltzmannQPolicy()
    memory = SequentialMemory(limit=50000, window_length=1)
    dqn = DQNAgent(model=modell, memory=memory, policy=policy, nb_actions=actionz, nb_steps_warmup=10, target_model_update=1e-2)
    return dqn

DQN = buildAgent(model, actions)
DQN.compile(Adam(lr=1e-3), metrics=['mae'])
DQN.fit(env, nb_steps=50000, visualize=False, verbose=1)

Solution

  • Your error came from importing Adam with from keras.optimizer_v1 import Adam, You can solve your problem with tf.keras.optimizers.Adam from TensorFlow >= v2 like below:

    (The lr argument is deprecated, it's better to use learning_rate instead.)

    # !pip install keras-rl2
    import tensorflow as tf
    from keras.layers import Dense, Flatten
    import gym
    from rl.agents.dqn import DQNAgent
    from rl.policy import BoltzmannQPolicy
    from rl.memory import SequentialMemory
    
    env = gym.make('CartPole-v0')
    states = env.observation_space.shape[0]
    actions = env.action_space.n
    episodes = 10
    
    def buildModel(statez, actiones):
        model = tf.keras.Sequential()
        model.add(Flatten(input_shape=(1, statez)))
        model.add(Dense(24, activation='relu'))
        model.add(Dense(24, activation='relu'))
        model.add(Dense(actiones, activation='linear'))
        return model
    
    def buildAgent(modell, actionz):
        policy = BoltzmannQPolicy()
        memory = SequentialMemory(limit=50000, window_length=1)
        dqn = DQNAgent(model=modell, memory=memory, policy=policy, 
                       nb_actions=actionz, nb_steps_warmup=10, 
                       target_model_update=1e-2)
        return dqn
    
    model = buildModel(states, actions)
    DQN = buildAgent(model, actions)
    DQN.compile(tf.keras.optimizers.Adam(learning_rate=1e-3), metrics=['mae'])
    DQN.fit(env, nb_steps=50000, visualize=False, verbose=1)