Search code examples
pythonreinforcement-learningopenai-api

issues applying q-learning with custom environment (python, reinforcement learning, openai)


I am trying to apply q-learning to my custom reinforcement learning environment that is representing energy storage arbitrage (electricity trading with a battery,charge when prices are low and discharge when prices increase). The environment works but I am not able to apply q-learning to it. Below the environment is a script that is able to run the environment but I am unsure what I should make the state variable. Any ideas on how to apply q-learning to optimize the charge/discharge cycles? the reset function starts the next day from a dataset with hourly prices for electricity. picture of the dataframe is below.

class BatteryEnv(gym.Env):

def __init__(self, df):

    self.dict_actions = {0:'discharge',1:'charge',2:'wait'}
    self.df = df
    self.action_space = spaces.Discrete(3)
    self.observation_space = spaces.Box(low=0, high=100, shape=(1,1))
    
    self.reward_list = []
    self.actual_load_list = []#observations
    self.SOE_list=[] #State of energy 
  
    self.state_idx = 0 #iteration (hour of the day)
    self.SOE = 0 #SOE
    self.MAX_charge = 20 #C-rate kinda
    self.Capacity =100
    

def step(self, action): 
    #mapping integer to action for actual load calculation
    str_action = self.dict_actions[action]
    
    #increase state idx within episode (1= 1 hour)
    self.state_idx+=1  
    
    #calculating our actual load
    if str_action == 'charge' and self.SOE < self.Capacity:
        SOE_charge = np.clip(self.Capacity - self.SOE, 0, self.MAX_charge)
        self.SOE += SOE_charge
        obs = SOE_charge * self.df['prices'][self.state_idx]
        
    elif str_action == 'discharge' and self.SOE > 0:
        SOE_discharge = np.clip(self.SOE, 0, self.MAX_charge)
        self.SOE -= SOE_discharge
        obs = -SOE_discharge * self.df['prices'][self.state_idx]

        
    else:
        self.SOE += 0
        obs = 0 * self.df['prices'][self.state_idx]

    
    # appending actual load to list for monitoring and comparison purposes
    self.actual_load_list.append(obs)
    self.SOE_list.append(self.SOE)
    
    #reward system
    if obs<0: #if observation is positive we spending money. if negative we earning
        reward =1
    else:
        reward =-1
    
    # appending curr reward to list for monitoring and comparison purposes
    self.reward_list.append(reward) 

    #checking whether our episode (day interval) ends
    if self.df.iloc[self.state_idx,:].Daynum != self.df.iloc[self.state_idx-1].Daynum: 
        done = True
    else:
        done = False
        
    return obs, reward, done
    
def reset(self): 
    return df.iloc[self.state_idx,:]

def render():
    pass

The below codes are able to to show that the environment is working.

for episode in range(7):
observation = env.reset()
for t in range(24): #can't be smaller than 24 as 24 time points equal to 1 episode (1 day)
    #print(observation)
    action = env.action_space.sample() #random actions
    observation, reward, done = env.step(action)
    if done:
        print("Episode finished after {} timesteps".format(t+1)), print (observation), print(reward)
        break

each timestep is one hour, prices are electricty price for that hour, and daynum is the number of  day out of 365


Solution

  • I think I was able to make the code sort of work with Q-learning. However, the reward and reset function needs some work to perform better.

    class BatteryEnv(gym.Env):
    
    def __init__(self, prices = np.array(df.prices), daynum = np.array(df.Daynum)):
        
        #self.df = df
        
        self.prices = prices
        self.daynum = daynum
        
        self.dict_actions = {0:'discharge',1:'charge',2:'wait'}
    
        self.action_space = spaces.Discrete(3)
        
        # our observation space is just one float value - our load 
        self.observation_space = spaces.Box(low=0, high=100, shape=(1,1))
        
        # reward list for monitoring
        self.reward_list = []
    
        # lists 4 monitoring
        self.actual_load_list = []
        self.SOE_list=[] #State of energy 
        self.chargio = [] #charge & discharge
        self.SOEe=[] #State of energy
        
        # index of current state within current episode
        self.state_idx = 0 #iteration
        self.SOE = 0 #SOE
        self.MAX_charge = 20 #C-rate kinda
        self.Capacity =100
        
        self.state = 0
        
    
    def step(self, action): 
        #mapping integer to action for actual load calculation
        str_action = self.dict_actions[action]
        
        #increase state idx within episode (day)
        self.state_idx+=1  
        
        #calculating our actual load
        if str_action == 'charge' and self.SOE < self.Capacity:
            SOE_charge = np.clip(self.Capacity - self.SOE, 0, self.MAX_charge)
            self.state += SOE_charge
            self.SOEe.append(self.SOE)
            self.chargio.append(SOE_charge)
            obs = SOE_charge * self.prices[self.state_idx]
            
        elif str_action == 'discharge' and self.SOE > 0:
            SOE_discharge = np.clip(self.SOE, 0, self.MAX_charge)
            self.state -= SOE_discharge
            self.SOEe.append(self.SOE)
            self.chargio.append(-SOE_discharge)
            obs = -SOE_discharge * self.prices[self.state_idx]
    
            
        else:
            self.state += 0
            self.chargio.append(0)
            self.SOEe.append(self.SOE)
            obs = 0
    
        
        # appending actual load to list for monitoring and comparison purposes
        self.actual_load_list.append(obs)
        self.SOE_list.append(self.SOE)
    
        
        #reward system
        if obs<0: #if observation is positive we spending money. if negative we earning
            reward =1
        else:
            reward =-1
        
        # appending curr reward to list for monitoring and comparison purposes
        self.reward_list.append(reward) 
    
        #checking whether our episode (day interval) ends
        if self.daynum[self.state_idx] != self.daynum[self.state_idx-1]: 
            done = True
        else:
            done = False
                
            
        info = {
            #'step': self.state_idx,
            'SOE': self.SOE,
            #'reward': reward,
            'chargio': self.chargio
                }
            
        return obs, reward, done, info
        
    
    def reset(self):
        self.state = 0
        return self.state
    
    
    def render():
        pass
    

    applying q-learning:

    env.reset()
    env = BatteryEnv()
    discrete_os_size = [20] * len(env.observation_space.high)
    discrete_os_win_size = (env.observation_space.high - 
    env.observation_space.low)/discrete_os_size
    discrete_os_win_size #buckets of 10
    learning_rate = 0.1
    discount =0.95 #measure of how important future actions are
    episodes =25000
    q_table = np.random.uniform(low=-2, high=2, size=(discrete_os_size + [env.action_space.n]))
    
    def get_discrete_state(state): #change SOE for other states
    discrete_state = (state - env.observation_space.low)/discrete_os_win_size
    return tuple(discrete_state.astype(np.int))
    
    discrete_state =get_discrete_state(env.reset())
    
    SOE=[]
    
    for episode in range (episodes):
    if episode % 5000 ==0:
        print(episode)
    
    discrete_state =get_discrete_state(env.reset())
    done = False
    while not done:
        action = np.argmax(q_table[discrete_state])
        new_state, reward, done, _ =env.step(action)
    
        new_discrete_state = get_discrete_state(new_state)
    
        if not done:
            max_future_q = np.max(q_table[new_discrete_state])
            current_q = q_table[discrete_state + (action,)]
            new_q = (1-learning_rate) * current_q + learning_rate *(reward + discount * max_future_q)
            q_table[discrete_state +(action,)] = new_q
    
        #elif new_state[0] >= env.go:
    
        discrete_state = new_discrete_state
        
        SOE.append(new_state)
    
    
    
        print(reward, new_state)