在自定义环境中应用 q-learning 的问题(python、强化学习、openai)

issues applying q-learning with custom environment (python, reinforcement learning, openai)

我正在尝试将 q-learning 应用于我的自定义强化学习环境,该环境代表储能套利(用电池进行电力交易,在价格低时充电,在价格上涨时放电)。环境有效,但我无法对其应用 q-learning。环境下方是一个能够 运行 环境的脚本,但我不确定应该将状态变量设为什么。关于如何应用 q-learning 来优化 charge/discharge 周期的任何想法?重置功能在第二天从具有每小时电价的数据集中开始。数据框的图片如下。

class 电池环境(gym.Env):

def __init__(self, df):

    self.dict_actions = {0:'discharge',1:'charge',2:'wait'}
    self.df = df
    self.action_space = spaces.Discrete(3)
    self.observation_space = spaces.Box(low=0, high=100, shape=(1,1))
    
    self.reward_list = []
    self.actual_load_list = []#observations
    self.SOE_list=[] #State of energy 
  
    self.state_idx = 0 #iteration (hour of the day)
    self.SOE = 0 #SOE
    self.MAX_charge = 20 #C-rate kinda
    self.Capacity =100
    

def step(self, action): 
    #mapping integer to action for actual load calculation
    str_action = self.dict_actions[action]
    
    #increase state idx within episode (1= 1 hour)
    self.state_idx+=1  
    
    #calculating our actual load
    if str_action == 'charge' and self.SOE < self.Capacity:
        SOE_charge = np.clip(self.Capacity - self.SOE, 0, self.MAX_charge)
        self.SOE += SOE_charge
        obs = SOE_charge * self.df['prices'][self.state_idx]
        
    elif str_action == 'discharge' and self.SOE > 0:
        SOE_discharge = np.clip(self.SOE, 0, self.MAX_charge)
        self.SOE -= SOE_discharge
        obs = -SOE_discharge * self.df['prices'][self.state_idx]

        
    else:
        self.SOE += 0
        obs = 0 * self.df['prices'][self.state_idx]

    
    # appending actual load to list for monitoring and comparison purposes
    self.actual_load_list.append(obs)
    self.SOE_list.append(self.SOE)
    
    #reward system
    if obs<0: #if observation is positive we spending money. if negative we earning
        reward =1
    else:
        reward =-1
    
    # appending curr reward to list for monitoring and comparison purposes
    self.reward_list.append(reward) 

    #checking whether our episode (day interval) ends
    if self.df.iloc[self.state_idx,:].Daynum != self.df.iloc[self.state_idx-1].Daynum: 
        done = True
    else:
        done = False
        
    return obs, reward, done
    
def reset(self): 
    return df.iloc[self.state_idx,:]

def render():
    pass

下面的代码可以显示环境正常工作。

for episode in range(7):
observation = env.reset()
for t in range(24): #can't be smaller than 24 as 24 time points equal to 1 episode (1 day)
    #print(observation)
    action = env.action_space.sample() #random actions
    observation, reward, done = env.step(action)
    if done:
        print("Episode finished after {} timesteps".format(t+1)), print (observation), print(reward)
        break

我想我能够使代码与 Q-learning 一起工作。然而,奖励和重置功能需要一些工作才能更好地发挥作用。

class BatteryEnv(gym.Env):

def __init__(self, prices = np.array(df.prices), daynum = np.array(df.Daynum)):
    
    #self.df = df
    
    self.prices = prices
    self.daynum = daynum
    
    self.dict_actions = {0:'discharge',1:'charge',2:'wait'}

    self.action_space = spaces.Discrete(3)
    
    # our observation space is just one float value - our load 
    self.observation_space = spaces.Box(low=0, high=100, shape=(1,1))
    
    # reward list for monitoring
    self.reward_list = []

    # lists 4 monitoring
    self.actual_load_list = []
    self.SOE_list=[] #State of energy 
    self.chargio = [] #charge & discharge
    self.SOEe=[] #State of energy
    
    # index of current state within current episode
    self.state_idx = 0 #iteration
    self.SOE = 0 #SOE
    self.MAX_charge = 20 #C-rate kinda
    self.Capacity =100
    
    self.state = 0
    

def step(self, action): 
    #mapping integer to action for actual load calculation
    str_action = self.dict_actions[action]
    
    #increase state idx within episode (day)
    self.state_idx+=1  
    
    #calculating our actual load
    if str_action == 'charge' and self.SOE < self.Capacity:
        SOE_charge = np.clip(self.Capacity - self.SOE, 0, self.MAX_charge)
        self.state += SOE_charge
        self.SOEe.append(self.SOE)
        self.chargio.append(SOE_charge)
        obs = SOE_charge * self.prices[self.state_idx]
        
    elif str_action == 'discharge' and self.SOE > 0:
        SOE_discharge = np.clip(self.SOE, 0, self.MAX_charge)
        self.state -= SOE_discharge
        self.SOEe.append(self.SOE)
        self.chargio.append(-SOE_discharge)
        obs = -SOE_discharge * self.prices[self.state_idx]

        
    else:
        self.state += 0
        self.chargio.append(0)
        self.SOEe.append(self.SOE)
        obs = 0

    
    # appending actual load to list for monitoring and comparison purposes
    self.actual_load_list.append(obs)
    self.SOE_list.append(self.SOE)

    
    #reward system
    if obs<0: #if observation is positive we spending money. if negative we earning
        reward =1
    else:
        reward =-1
    
    # appending curr reward to list for monitoring and comparison purposes
    self.reward_list.append(reward) 

    #checking whether our episode (day interval) ends
    if self.daynum[self.state_idx] != self.daynum[self.state_idx-1]: 
        done = True
    else:
        done = False
            
        
    info = {
        #'step': self.state_idx,
        'SOE': self.SOE,
        #'reward': reward,
        'chargio': self.chargio
            }
        
    return obs, reward, done, info
    

def reset(self):
    self.state = 0
    return self.state


def render():
    pass

应用 q 学习:

env.reset()
env = BatteryEnv()
discrete_os_size = [20] * len(env.observation_space.high)
discrete_os_win_size = (env.observation_space.high - 
env.observation_space.low)/discrete_os_size
discrete_os_win_size #buckets of 10
learning_rate = 0.1
discount =0.95 #measure of how important future actions are
episodes =25000
q_table = np.random.uniform(low=-2, high=2, size=(discrete_os_size + [env.action_space.n]))

def get_discrete_state(state): #change SOE for other states
discrete_state = (state - env.observation_space.low)/discrete_os_win_size
return tuple(discrete_state.astype(np.int))

discrete_state =get_discrete_state(env.reset())

SOE=[]

for episode in range (episodes):
if episode % 5000 ==0:
    print(episode)

discrete_state =get_discrete_state(env.reset())
done = False
while not done:
    action = np.argmax(q_table[discrete_state])
    new_state, reward, done, _ =env.step(action)

    new_discrete_state = get_discrete_state(new_state)

    if not done:
        max_future_q = np.max(q_table[new_discrete_state])
        current_q = q_table[discrete_state + (action,)]
        new_q = (1-learning_rate) * current_q + learning_rate *(reward + discount * max_future_q)
        q_table[discrete_state +(action,)] = new_q

    #elif new_state[0] >= env.go:

    discrete_state = new_discrete_state
    
    SOE.append(new_state)



    print(reward, new_state)