what does "IndexError: index 20 is out of bounds for axis 1 with size 20"
what does "IndexError: index 20 is out of bounds for axis 1 with size 20"
我在迷宫环境中进行 q 学习,但是,在初始阶段,它运行良好,但之后,我得到以下信息
max_future_q = np.max(q_table[new_discrete_state])
IndexError:索引 20 超出轴 1 的范围,尺寸为 20
我不明白这是什么问题
下面是代码:
enter code here
import gym
import numpy as np
import gym_maze
env = gym.make("maze-v0")
LEARNING_RATE = 0.1
DISCOUNT = 0.95
EPISODES = 25000
SHOW_EVERY = 3000
DISCRETE_OS_SIZE = [20, 20]
discrete_os_win_size = (env.observation_space.high - env.observation_space.low)/DISCRETE_OS_SIZE
# Exploration settings
epsilon = 1 # not a constant, qoing to be decayed
START_EPSILON_DECAYING = 1
END_EPSILON_DECAYING = EPISODES//2
epsilon_decay_value = epsilon/(END_EPSILON_DECAYING - START_EPSILON_DECAYING)
q_table = np.random.uniform(low=-2, high=0, size=(DISCRETE_OS_SIZE + [env.action_space.n]))
def get_discrete_state(state):
discrete_state = (state - env.observation_space.low)/discrete_os_win_size
return tuple(discrete_state.astype(np.int)) # we use this tuple to look up the 3 Q values for the available actions in the q-table
for episode in range(EPISODES):
discrete_state = get_discrete_state(env.reset())
done = False
if episode % SHOW_EVERY == 0:
render = True
print(episode)
else:
render = False
while not done:
if np.random.random() > epsilon:
# Get action from Q table
action = np.argmax(q_table[discrete_state])
else:
# Get random action
action = np.random.randint(0, env.action_space.n)
new_state, reward, done, _ = env.step(action)
new_discrete_state = get_discrete_state(new_state)
if episode % SHOW_EVERY == 0:
env.render()
#new_q = (1 - LEARNING_RATE) * current_q + LEARNING_RATE * (reward + DISCOUNT * max_future_q)
# If simulation did not end yet after last step - update Q table
if not done:
# Maximum possible Q value in next step (for new state)
max_future_q = np.max(q_table[new_discrete_state])
# Current Q value (for current state and performed action)
current_q = q_table[discrete_state + (action,)]
# And here's our equation for a new Q value for current state and action
new_q = (1 - LEARNING_RATE) * current_q + LEARNING_RATE * (reward + DISCOUNT * max_future_q)
# Update Q table with new Q value
q_table[discrete_state + (action,)] = new_q
# Simulation ended (for any reson) - if goal position is achived - update Q value with reward directly
elif new_state[0] >= env.goal_position:
#q_table[discrete_state + (action,)] = reward
q_table[discrete_state + (action,)] = 0
discrete_state = new_discrete_state
# Decaying is being done every episode if episode number is within decaying range
if END_EPSILON_DECAYING >= episode >= START_EPSILON_DECAYING:
epsilon -= epsilon_decay_value
env.close()
该错误意味着您试图索引形状为 (n,20) axis 1 with size 20
和 20
的数组。例如 np.zeros((10,20))[:,20]
尝试验证 np 数组和索引的大小
索引越界错误意味着您正在尝试访问位于容器中不存在的索引处的项目。你不能select一行五人中的第六个人。
Python,像大多数编程语言一样,是从 0 开始索引的。这意味着容器中的第一项的索引为 0,而不是 1。因此,大小为 5 的容器中的项的索引将为
0, 1, 2, 3, 4
如您所见,容器中最后一项的索引比容器的大小小 1。在 python 中,您可以使用
获取容器中最后一项的索引
len(foo) - 1
我在迷宫环境中进行 q 学习,但是,在初始阶段,它运行良好,但之后,我得到以下信息 max_future_q = np.max(q_table[new_discrete_state]) IndexError:索引 20 超出轴 1 的范围,尺寸为 20
我不明白这是什么问题 下面是代码:
enter code here
import gym
import numpy as np
import gym_maze
env = gym.make("maze-v0")
LEARNING_RATE = 0.1
DISCOUNT = 0.95
EPISODES = 25000
SHOW_EVERY = 3000
DISCRETE_OS_SIZE = [20, 20]
discrete_os_win_size = (env.observation_space.high - env.observation_space.low)/DISCRETE_OS_SIZE
# Exploration settings
epsilon = 1 # not a constant, qoing to be decayed
START_EPSILON_DECAYING = 1
END_EPSILON_DECAYING = EPISODES//2
epsilon_decay_value = epsilon/(END_EPSILON_DECAYING - START_EPSILON_DECAYING)
q_table = np.random.uniform(low=-2, high=0, size=(DISCRETE_OS_SIZE + [env.action_space.n]))
def get_discrete_state(state):
discrete_state = (state - env.observation_space.low)/discrete_os_win_size
return tuple(discrete_state.astype(np.int)) # we use this tuple to look up the 3 Q values for the available actions in the q-table
for episode in range(EPISODES):
discrete_state = get_discrete_state(env.reset())
done = False
if episode % SHOW_EVERY == 0:
render = True
print(episode)
else:
render = False
while not done:
if np.random.random() > epsilon:
# Get action from Q table
action = np.argmax(q_table[discrete_state])
else:
# Get random action
action = np.random.randint(0, env.action_space.n)
new_state, reward, done, _ = env.step(action)
new_discrete_state = get_discrete_state(new_state)
if episode % SHOW_EVERY == 0:
env.render()
#new_q = (1 - LEARNING_RATE) * current_q + LEARNING_RATE * (reward + DISCOUNT * max_future_q)
# If simulation did not end yet after last step - update Q table
if not done:
# Maximum possible Q value in next step (for new state)
max_future_q = np.max(q_table[new_discrete_state])
# Current Q value (for current state and performed action)
current_q = q_table[discrete_state + (action,)]
# And here's our equation for a new Q value for current state and action
new_q = (1 - LEARNING_RATE) * current_q + LEARNING_RATE * (reward + DISCOUNT * max_future_q)
# Update Q table with new Q value
q_table[discrete_state + (action,)] = new_q
# Simulation ended (for any reson) - if goal position is achived - update Q value with reward directly
elif new_state[0] >= env.goal_position:
#q_table[discrete_state + (action,)] = reward
q_table[discrete_state + (action,)] = 0
discrete_state = new_discrete_state
# Decaying is being done every episode if episode number is within decaying range
if END_EPSILON_DECAYING >= episode >= START_EPSILON_DECAYING:
epsilon -= epsilon_decay_value
env.close()
该错误意味着您试图索引形状为 (n,20) axis 1 with size 20
和 20
的数组。例如 np.zeros((10,20))[:,20]
尝试验证 np 数组和索引的大小
索引越界错误意味着您正在尝试访问位于容器中不存在的索引处的项目。你不能select一行五人中的第六个人。
Python,像大多数编程语言一样,是从 0 开始索引的。这意味着容器中的第一项的索引为 0,而不是 1。因此,大小为 5 的容器中的项的索引将为
0, 1, 2, 3, 4
如您所见,容器中最后一项的索引比容器的大小小 1。在 python 中,您可以使用
获取容器中最后一项的索引len(foo) - 1