Project Notebook - experiments with three Q-learning algorithms

Import libraries

In [49]:
%matplotlib 
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import hsv_to_rgb
import itertools
Using matplotlib backend: MacOSX

Define R Matrix

In [152]:
# set grid size
grid_length = 12 
grid_height = 6

R_matrix = np.full((grid_height, grid_length),-1) # every other step receives -1 reward 
R_matrix[-1, 1:grid_length-1] = -100 # cliff 
R_matrix[grid_height-2:-1, grid_length-8:grid_length-4] = -100
R_matrix[grid_height-3:grid_height-2, grid_length-7:grid_length-5] = -100
R_matrix[grid_height-4:grid_height-3, grid_length-3:grid_length] = -100
R_matrix[grid_height-5:grid_height-4, grid_length-9:grid_length-8] = -100
R_matrix[-1, -1] = 100 #objective
R_matrix
Out[152]:
array([[  -1,   -1,   -1,   -1,   -1,   -1,   -1,   -1,   -1,   -1,   -1,
          -1],
       [  -1,   -1,   -1, -100,   -1,   -1,   -1,   -1,   -1,   -1,   -1,
          -1],
       [  -1,   -1,   -1,   -1,   -1,   -1,   -1,   -1,   -1, -100, -100,
        -100],
       [  -1,   -1,   -1,   -1,   -1, -100, -100,   -1,   -1,   -1,   -1,
          -1],
       [  -1,   -1,   -1,   -1, -100, -100, -100, -100,   -1,   -1,   -1,
          -1],
       [  -1, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100,
         100]])

Set-up Cliff environment

Set function for renderer

In [153]:
#for the renderer
def change_range(values, vmin=0, vmax=1):
    start_zero = values - np.min(values)
    return (start_zero / (np.max(start_zero) + 1e-7)) * (vmax - vmin) + vmin
In [154]:
class CliffEnvironment:
    #change these colors
    terrain_color = dict(normal=[127/360, 0, 96/100],
                         objective=[26/360, 100/100, 100/100],
                         cliff=[247/360, 92/100, 70/100],
                         player=[344/360, 93/100, 100/100])
    
    
    def __init__(self):
        self.player = None
        self._create_grid()  
        self._draw_grid()
        
    def _create_grid(self, initial_grid=None):
        self.grid = self.terrain_color['normal'] * np.ones((grid_height, grid_length, 3))
        self._set_terrain(self.grid)
        
    def _set_terrain(self, grid):
        grid[-1, 1:grid_length-1] = self.terrain_color['cliff']
        grid[grid_height-2:-1, grid_length-8:grid_length-4] = self.terrain_color['cliff']
        grid[grid_height-3:grid_height-2, grid_length-7:grid_length-5] = self.terrain_color['cliff']
        grid[grid_height-4:grid_height-3, grid_length-3:grid_length] = self.terrain_color['cliff']
        grid[grid_height-5:grid_height-4, grid_length-9:grid_length-8] = self.terrain_color['cliff']
        grid[-1, -1] = self.terrain_color['objective']
        
    def _draw_grid(self):
        self.fig, self.ax = plt.subplots(figsize=(grid_length, grid_height))
        self.ax.grid(which='minor')       
        self.q_texts = [self.ax.text(*self._id_to_position(i)[::-1], '0',
                                     fontsize=11, verticalalignment='center', 
                                     horizontalalignment='center') for i in range(grid_length * grid_height)]     
         
        self.im = self.ax.imshow(hsv_to_rgb(self.grid), cmap='terrain',
                                 interpolation='nearest', vmin=0, vmax=1)        
        self.ax.set_xticks(np.arange(grid_length))
        self.ax.set_xticks(np.arange(grid_length) - 0.5, minor=True)
        self.ax.set_yticks(np.arange(grid_height))
        self.ax.set_yticks(np.arange(grid_height) - 0.5, minor=True)
        
    def reset(self):
        
        self.player = (grid_height-1, 0) # (y_coord, x,_coord, tuple to indicate current location )       
        return self._position_to_id(self.player)
    
    def step(self, action):
        # SETTING GRID MOVEMENT RULES AND CONSTRAINTS 
        #move UP
        # if action is move UP and the Y_cord is > 0 (i.e. agent is not in the top row of grid)
        if action == 0 and self.player[0] > 0:
            #move the agent one cell up
            self.player = (self.player[0] - 1, self.player[1])
        
        #move DOWN
        # if action is move DOWN and the Y_cord is < last row (i.e. agent is not in the bottom row of grid)
        if action == 1 and self.player[0] < grid_height-1:
            #move the agent one cell down
            self.player = (self.player[0] + 1, self.player[1])
        
        #move RIGHT
        # if action is move RIGHT and the X_cord is < last col (i.e. agent is not in the...
        # furthest right hand column of grid)
        if action == 2 and self.player[1] < grid_length-1:
            #move the agent one cell to the right 
            self.player = (self.player[0], self.player[1] + 1)
        
        #move LEFT
        # if action is move LEFT and the X_cord is > first col (i.e. agent is not in the...
        # furthest left hand column of grid)
        if action == 3 and self.player[1] > 0:
            #move the agent one cell to the left 
            self.player = (self.player[0], self.player[1] - 1)
        
        #Assign reward for chosen action
        #reward = R_matrix[self.player[1],self.player[1]]
        
        #
        if all(self.grid[self.player] == self.terrain_color['cliff']) or all(self.grid[self.player] == self.terrain_color['objective']):
            reward = R_matrix[self.player[0],self.player[1]]
            terminal_state = True
        else:
            reward = R_matrix[self.player[0],self.player[1]]
            terminal_state = False
            
        if all(self.grid[self.player] == self.terrain_color['objective']):
            #reward = R_matrix[self.player[0],self.player[1]]
            goal_state = True
        
        
        return self._position_to_id(self.player), reward, terminal_state

    
    def _position_to_id(self, pos):
        ''' Maps a position in x,y coordinates to a unique ID '''
        return pos[0] * grid_length + pos[1]
    
    def _id_to_position(self, idx):
        return (idx // grid_length), (idx % grid_length)
        
    def render(self, Q_matrix=None, action=None, max_q=False, colorize_q=False):
        assert self.player is not None, 'You first need to call .reset()'  
        
        if colorize_q:
            assert Q_matrix is not None, 'Q_matrix must not be None for using colorize_q'            
            grid = self.terrain_color['normal'] * np.ones((grid_height, grid_length, 3))
            values = change_range(np.max(Q_matrix, -1)).reshape(grid_height, grid_length)
            grid[:, :, 1] = values
            self._set_terrain(grid)
        else:            
            grid = self.grid.copy()
            
        grid[self.player] = self.terrain_color['player']       
        self.im.set_data(hsv_to_rgb(grid))
               
        if Q_matrix is not None:
            xs = np.repeat(np.arange(grid_length), grid_height)
            ys = np.tile(np.arange(grid_height), grid_length)  
            
            for i, text in enumerate(self.q_texts):
                if max_q:
                    q = max(Q_matrix[i])    
                    txt = '{:.2f}'.format(q)
                    text.set_text(txt)
                else:                
                    actions = ['U', 'D', 'R', 'L']
                    txt = '\n'.join(['{}: {:.2f}'.format(k, q) for k, q in zip(actions, Q_matrix[i])])
                    text.set_text(txt)
                
        if action is not None:
            self.ax.set_title(action, color='r', weight='bold', fontsize=32)

        plt.pause(0.01)
        #plt.show()
        #plt.draw()

Initialise parameters and environment

In [155]:
UP = 0
DOWN = 1
RIGHT = 2
LEFT = 3
actions = ['UP', 'DOWN', 'RIGHT', 'LEFT']
cliff = CliffEnvironment()
In [156]:
# The number of states in simply the number of "squares" in our grid world, in this case 4 * 12
num_states = grid_length * grid_height
# We have 4 possible actions, up, down, right and left
num_actions = 4

#Initialise the Q-matrix with an array of zeros. 
Q_matrix = np.zeros((num_states, num_actions))

df = pd.DataFrame(Q_matrix, columns=['A0: up', 'A1: down', 'A2: right', 'A3: left'])
df.index.name = 'States'
df.head(5)
Out[156]:
A0: up A1: down A2: right A3: left
States
0 0.0 0.0 0.0 0.0
1 0.0 0.0 0.0 0.0
2 0.0 0.0 0.0 0.0
3 0.0 0.0 0.0 0.0
4 0.0 0.0 0.0 0.0

Define an initial e-greedy policy

In [157]:
def egreedy_policy(Q_matrix, state, epsilon=0.9):
    ''' 
    Choose an action based on a epsilon greedy policy.    
    A random action is selected with epsilon probability, else select the best action.    
    '''
    if np.random.random() < epsilon:
        return np.random.choice(4)
    else:
        return np.argmax(Q_matrix[state])

Define our traditional q-learning algorithm (Watkins & Dayan, 2002)

In [158]:
def q_learning(cliff, num_episodes=5000, render=False, decay = True, epsilon = 0.9,
               epsilon_threshold = 0.75, decay1=0.999, decay2=0.9999, alpha_lr =0.1, gamma=0.9):    
    '''
    Based on 1992 Watkins and Dayan technical note on 'Q-learning'
    http://www.gatsby.ucl.ac.uk/~dayan/papers/cjch.pdf
    
    '''
    #Initialise Q(s,a) matrix 
    Q_matrix = np.zeros((num_states, num_actions))
    rewards_list = []
    #max_reward = 100-grid_length 
    max_reward = 84
    first_max_threshold = 0 
    episodes_to_max = 99999
    
    
    for episode in range(num_episodes):
        
        state = cliff.reset() #initialise state St
        terminal_state = False
        reward_sum = 0
        

        while not terminal_state:            
            # Choose action (At) from state (St) using egreedy_policy       
            action = egreedy_policy(Q_matrix, state, epsilon)
                       
            # Execute the action (At) to the environment 
            #and observe new state (s at t+1)
            
            next_state, reward, terminal_state = cliff.step(action)
            #Receive immediate reward (r at t+1)
            reward_sum += reward
            
            # Update Q_matrix using Watkins and Dayan 1992 update rule
            td_target = reward + gamma * np.max(Q_matrix[next_state])
            td_error = td_target - Q_matrix[state][action]
            Q_matrix[state][action] += alpha_lr * td_error
            state = next_state
            
            
            if decay: 
                if epsilon >= epsilon_threshold: 
                    epsilon =  decay1 * epsilon
                else: 
                    epsilon =  decay2 * epsilon            

            #x = input("press space")
            #if render:
            #    cliff.render(Q_matrix, action=actions[action], colorize_q=True)
               
            
        rewards_list.append(reward_sum)
        
        if first_max_threshold < 1 and reward_sum == 84:
            print('Found optimal path after only: {} episodes'.format(episode))
            episodes_to_max = episode
            first_max_threshold += 1
        
        if episode % 1000 == 0: 
            print('Episode: {} completed'.format(episode))
    
    return rewards_list, Q_matrix, episodes_to_max

Define the SA_Q_Learning algorithm (Guo et al 2004)

In [159]:
def sa_q_learning(cliff, num_episodes=5000, render=False, decay = True, epsilon = 0.9, 
                  epsilon_threshold = 0.5, decay1=0.999, decay2=0.9999, alpha_lr =0.1, gamma=0.9):    
    '''
    Based on 2004 paper from Guo et al: A new Q-learning algorithm based on the metropolis criterion
    https://ieeexplore.ieee.org/document/1335509
    
    '''
    
    #Initialise Q(s,a) matrix 
    Q_matrix = np.zeros((num_states, num_actions))
    rewards_list = []
    #max_reward = 100-grid_length
    max_reward = 84
    first_max_threshold = 0 
    episodes_to_max = 99999
    
    
    for episode in range(num_episodes):
        
        state = cliff.reset() #initialise state St
        terminal_state = False
        reward_sum = 0

        while not terminal_state:            
            # Choose action (At) from state (St) using egreedy_policy       
            action_policy = egreedy_policy(Q_matrix, state, epsilon)
            
            #select an action arbitrarily
            action_r = np.random.choice(4)
            
            #extract the Q values for both the policy action and the 
            # arbitrary action_r
            Q_p = Q_matrix[state][action_policy]
            Q_r = Q_matrix[state][action_r]
            
            #generate a random value ξ∈(0,1) 
            ξ = np.random.random()
           
            if ξ < np.exp((Q_r - Q_p)/epsilon):
                # Execute the action (At) to the environment 
                #and observe new state (s at t+1)
                next_state, reward, terminal_state = cliff.step(action_r)
                #Receive immediate reward (r at t+1)
                reward_sum += reward

                # Update Q_matrix using Watkins and Dayan 1992 update rule
                td_target = reward + gamma * np.max(Q_matrix[next_state])
                td_error = td_target - Q_matrix[state][action_r]
                Q_matrix[state][action_r] += alpha_lr * td_error
                
            else: 
                next_state, reward, terminal_state = cliff.step(action_policy)
                
                #Receive immediate reward (r at t+1)
                reward_sum += reward

                # Update Q_matrix using Watkins and Dayan 1992 update rule
                td_target = reward + gamma * np.max(Q_matrix[next_state])
                td_error = td_target - Q_matrix[state][action_policy]
                Q_matrix[state][action_policy] += alpha_lr * td_error
            
            state = next_state
            
            if decay: 
                if epsilon >= epsilon_threshold: 
                    epsilon =  decay1 * epsilon
                else: 
                    epsilon =  decay2 * epsilon

            if render:
                cliff.render(Q_matrix, action=actions[action], colorize_q=True)
            
        rewards_list.append(reward_sum)
        
        if first_max_threshold < 1 and reward_sum == 84:
            print('Found optimal path after only: {} episodes'.format(episode))
            episodes_to_max = episode
            first_max_threshold += 1
        
        if episode % 1000 == 0: 
            print('Episode: {} completed'.format(episode))
    
    return rewards_list, Q_matrix, episodes_to_max

Define the Backwards Q-learning based Sarsa algorithm BQSA (Hao Wang et al, 2013)

In [160]:
def bqsa_learning(cliff, num_episodes=5000, render=False, decay = True, epsilon = 0.9, 
                  epsilon_threshold = 0.8, decay1=0.999, decay2=0.9999, alpha_lr =0.1, gamma=0.9):    
    '''
    Backward Q based SARSA algorithm 
    based on paper by Hao Wang et al 2013 
    https://www.sciencedirect.com/science/article/abs/pii/S0952197613001176
    
    
    '''
    
    #Initialise Q(s,a) matrix 
    Q_matrix = np.zeros((num_states, num_actions))
    rewards_list = []
    #max_reward = 100-grid_length 
    max_reward = 84
    first_max_threshold = 0 
    episodes_to_max = 99999
    
    for episode in range(num_episodes):
        
        state = cliff.reset() #initialise state St
        
        # Choose action (At) from state (St) using egreedy_policy       
        action = egreedy_policy(Q_matrix, state, epsilon)
        
        terminal_state = False
        goal_state = False
        reward_sum = 0
        
        M = []
        N = 0

        while not terminal_state and N < 200:  

            # Execute the action (At) to the environment 
            #and observe new state (s at t+1)
            next_state, reward, terminal_state = cliff.step(action)

            #Receive immediate reward (r at t+1)
            reward_sum += reward
            
            # Choose next action
            next_action = egreedy_policy(Q_matrix, next_state, epsilon)
            # Next q value is the value of the next action
            
            M_i = {"i": N, 
                "st": state, 
                "at": action, 
                "rt+1": reward,
                "st+1": next_state}
            
            M.append(M_i)
            #print(M)
            # Next q value is the value of the next action
            #td_target = reward + gamma * np.max(Q_matrix[next_state][action])
            #td_error = td_target - Q_matrix[state][action]
            update = alpha_lr * (reward + (gamma * Q_matrix[next_state][next_action]) - Q_matrix[state][action])
            # Update q value
            Q_matrix[state][action] += update
            #td_target = reward + gamma * np.max(Q_matrix[next_state])
            #td_error = td_target - Q_matrix[state][action]
            #Q_matrix[state][action] += alpha_lr * td_error
            #state = next_state

            # Update state and action        
            state = next_state
            action = next_action
            
            if decay: 
                if epsilon >= epsilon_threshold: 
                    epsilon =  decay1 * epsilon
                else: 
                    epsilon =  decay2 * epsilon            

            #if render:
                #cliff.render(Q_matrix, action=actions[action], colorize_q=True)
                
            N += 1
            
        rewards_list.append(reward_sum)
        
            
        if goal_state:
            for x in reversed(range(N)):
                update = alpha_lr * (reward + (gamma * np.max(Q_matrix[M[x]["st+1"]][M[x]["at"]])) - Q_matrix[M[x]["st"]][M[x]["at"]]) 
                Q_matrix[M[x]["st"]][M[x]["at"]] += update  
            
            
        
        if first_max_threshold < 1 and reward_sum == 84:
            print('Found optimal path after only: {} episodes'.format(episode))
            episodes_to_max = episode
            first_max_threshold += 1
        
        if episode % 1000 == 0: 
            print('Episode: {} completed'.format(episode))
            
        #if decay: 
               # if epsilon >= epsilon_threshold: 
                  #  epsilon =  decay1 * epsilon
                #else: 
                    #epsilon =  decay2 * epsilon
    
    return rewards_list, Q_matrix, episodes_to_max, M

Define a parse_results_list() utility function to be called within the run_Q main function

In [161]:
%matplotlib inline

def parse_results_list(results_list): 
    
    
    # from list of dicts to PD dataframe
    parsed_results_df = pd.DataFrame(results_list, index=None)
    
    #print top 5 by episodes to optimal solution
    print("Top 5 results, by episodes to find optimal path: \n", 
          parsed_results_df.sort_values('Epi_to_max', ascending = True).head())
    
    #print top 5 by mean reward over 5000 episodes 
    print("\n \n Top 5 results, by mean reward over 5000 episodes: \n",
          parsed_results_df.sort_values('Mean reward', ascending = False).head())

    #filter out experiments that did not find optimal path
    completed_parsed_results_df = parsed_results_df[parsed_results_df.Epi_to_max < 999]
    
    #plot results
    
    if len(completed_parsed_results_df) != 0:
        completed_parsed_results_df.plot.scatter(x='Epi_to_max', y='Mean reward')
        return completed_parsed_results_df
    else:
         print("No experiments in this batch made it to the end of the game .")
    
    

Q-LEARNING: grid-search to find optimal parameters.

In [147]:
def run_Q (gamma_param = [0.05,0.1,0.2,0.3,0.4], 
           alpha_lr_param = [0.05,0.1,0.2,0.3,0.4], 
           decay_param = [True],
           decay1 = [0.999],
           decay2 = [0.9999],
           verbose = True, 
           plot_figures = True,
           parse_results = True
          ):
    
    max_experiments = len(gamma_param)*len(alpha_lr_param)*len(decay_param)*len(decay1)*len(decay2) 
    results_list = []
    experiment_id = 1

    for d in decay1:
        for e in decay2: 
            for i in gamma_param:
                for j in alpha_lr_param: 
                    for k in decay_param: 
                        q_learning_rewards, Q, episodes_to_max = zip(*[q_learning(cliff, decay = k, decay1=d,decay2=e,gamma= i, render=True,
                                                         alpha_lr= j ) for _ in range(1)])
                        avg_rewards = np.mean(q_learning_rewards, axis=0)
                        mean_reward = [np.mean(avg_rewards)] * len(avg_rewards)
                        #max_reward = np.max(q_learning_rewards, axis=0)

                        if plot_figures:
                            fig, ax = plt.subplots()
                            ax.set_xlabel('Episodes')
                            ax.set_ylabel('Rewards')
                            ax.set_title('Experiment ID: {} \n Params: gamma = {}, alpha_lr = {}, decay = {} \n Mean Reward: {} \n Episodes to find optimal path: {}'.format(experiment_id, i, j, k, mean_reward[0],"Not found" if min(episodes_to_max) == 99999 else min(episodes_to_max) ))

                            rolling_avg_rewards = pd.Series(avg_rewards).rolling(20, min_periods=20).mean()


                            ax.plot(rolling_avg_rewards, color='green')

                            ax.plot(mean_reward, 'g--')

                            if min(episodes_to_max) != 99999: 
                                ax.axvline(x=min(episodes_to_max), color='r', linestyle='--')

                            plt.tight_layout()
                            fig.savefig('{}_results_q.png'.format(experiment_id), pad_inches=1)

                        results = {'Experiment_ID': experiment_id,
                                   'gamma': i,
                                   'alpha_lr': j,
                                   'decay': k,
                                   'decay1':d,
                                   'decay2':e,
                                   #'Episode Length':steps,
                                   'Mean reward': mean_reward[0],
                                   'Epi_to_max' : min(episodes_to_max)
                                  }

                        results_list.append(results)

                        if verbose: 
                            print ('Experiment {} of {} complete'.format(experiment_id, max_experiments))
                        experiment_id += 1
            
    if parse_results: 
        parsed_results_df = parse_results_list(results_list)
        return parsed_results_df

    else: 
        return results_list
    
Q_results = run_Q()
Episode: 0 completed
Found optimal path after only: 931 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 1 of 25 complete
Episode: 0 completed
Found optimal path after only: 971 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 2 of 25 complete
Episode: 0 completed
Found optimal path after only: 781 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 3 of 25 complete
Episode: 0 completed
Found optimal path after only: 630 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 4 of 25 complete
Episode: 0 completed
Found optimal path after only: 620 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 5 of 25 complete
Episode: 0 completed
Episode: 1000 completed
Found optimal path after only: 1196 episodes
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 6 of 25 complete
Episode: 0 completed
Found optimal path after only: 901 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 7 of 25 complete
Episode: 0 completed
Found optimal path after only: 669 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 8 of 25 complete
Episode: 0 completed
Found optimal path after only: 617 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 9 of 25 complete
Episode: 0 completed
Found optimal path after only: 551 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 10 of 25 complete
Episode: 0 completed
Found optimal path after only: 924 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 11 of 25 complete
Episode: 0 completed
Found optimal path after only: 783 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 12 of 25 complete
Episode: 0 completed
Found optimal path after only: 536 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 13 of 25 complete
Episode: 0 completed
Found optimal path after only: 558 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 14 of 25 complete
Episode: 0 completed
Found optimal path after only: 537 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 15 of 25 complete
Episode: 0 completed
Found optimal path after only: 936 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 16 of 25 complete
Episode: 0 completed
Found optimal path after only: 729 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 17 of 25 complete
Episode: 0 completed
Found optimal path after only: 598 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 18 of 25 complete
Episode: 0 completed
Found optimal path after only: 496 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 19 of 25 complete
Episode: 0 completed
Found optimal path after only: 375 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 20 of 25 complete
Episode: 0 completed
Found optimal path after only: 900 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
/Users/JamesPaulPhelan/anaconda3/lib/python3.6/site-packages/matplotlib/pyplot.py:524: RuntimeWarning: More than 20 figures have been opened. Figures created through the pyplot interface (`matplotlib.pyplot.figure`) are retained until explicitly closed and may consume too much memory. (To control this warning, see the rcParam `figure.max_open_warning`).
  max_open_warning, RuntimeWarning)
Experiment 21 of 25 complete
Episode: 0 completed
Found optimal path after only: 690 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 22 of 25 complete
Episode: 0 completed
Found optimal path after only: 573 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 23 of 25 complete
Episode: 0 completed
Found optimal path after only: 486 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 24 of 25 complete
Episode: 0 completed
Found optimal path after only: 500 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 25 of 25 complete
Top 5 results, by episodes to find optimal path: 
     Epi_to_max  Experiment_ID  Mean reward  alpha_lr  decay  decay1  decay2  \
19         375             20      48.4658       0.4   True   0.999  0.9999   
23         486             24      45.8934       0.3   True   0.999  0.9999   
18         496             19      48.3994       0.3   True   0.999  0.9999   
24         500             25      46.9630       0.4   True   0.999  0.9999   
12         536             13      48.1236       0.2   True   0.999  0.9999   

    gamma  
19    0.3  
23    0.4  
18    0.3  
24    0.4  
12    0.2  

 
 Top 5 results, by mean reward over 5000 episodes: 
     Epi_to_max  Experiment_ID  Mean reward  alpha_lr  decay  decay1  decay2  \
7          669              8      52.3890      0.20   True   0.999  0.9999   
11         783             12      52.2286      0.10   True   0.999  0.9999   
16         729             17      52.1730      0.10   True   0.999  0.9999   
20         900             21      51.3020      0.05   True   0.999  0.9999   
3          630              4      51.0756      0.30   True   0.999  0.9999   

    gamma  
7    0.10  
11   0.20  
16   0.30  
20   0.40  
3    0.05  
In [194]:
Q_results['Algorithm'] = 'Q-Learning'
Q_results.to_csv('/Users/JamesPaulPhelan/Desktop/Q_results', sep='\t')
Q_results
Out[194]:
Epi_to_max Experiment_ID Mean reward alpha_lr decay decay1 decay2 gamma Algorithm
0 931 1 44.6274 0.05 True 0.999 0.9999 0.05 Q-Learning
1 971 2 48.2316 0.10 True 0.999 0.9999 0.05 Q-Learning
2 781 3 49.9084 0.20 True 0.999 0.9999 0.05 Q-Learning
3 630 4 51.0756 0.30 True 0.999 0.9999 0.05 Q-Learning
4 620 5 47.4086 0.40 True 0.999 0.9999 0.05 Q-Learning
6 901 7 50.4472 0.10 True 0.999 0.9999 0.10 Q-Learning
7 669 8 52.3890 0.20 True 0.999 0.9999 0.10 Q-Learning
8 617 9 47.6996 0.30 True 0.999 0.9999 0.10 Q-Learning
9 551 10 48.8646 0.40 True 0.999 0.9999 0.10 Q-Learning
10 924 11 50.2824 0.05 True 0.999 0.9999 0.20 Q-Learning
11 783 12 52.2286 0.10 True 0.999 0.9999 0.20 Q-Learning
12 536 13 48.1236 0.20 True 0.999 0.9999 0.20 Q-Learning
13 558 14 47.7614 0.30 True 0.999 0.9999 0.20 Q-Learning
14 537 15 46.4736 0.40 True 0.999 0.9999 0.20 Q-Learning
15 936 16 50.1452 0.05 True 0.999 0.9999 0.30 Q-Learning
16 729 17 52.1730 0.10 True 0.999 0.9999 0.30 Q-Learning
17 598 18 46.6736 0.20 True 0.999 0.9999 0.30 Q-Learning
18 496 19 48.3994 0.30 True 0.999 0.9999 0.30 Q-Learning
19 375 20 48.4658 0.40 True 0.999 0.9999 0.30 Q-Learning
20 900 21 51.3020 0.05 True 0.999 0.9999 0.40 Q-Learning
21 690 22 49.8238 0.10 True 0.999 0.9999 0.40 Q-Learning
22 573 23 48.3804 0.20 True 0.999 0.9999 0.40 Q-Learning
23 486 24 45.8934 0.30 True 0.999 0.9999 0.40 Q-Learning
24 500 25 46.9630 0.40 True 0.999 0.9999 0.40 Q-Learning
In [173]:
import seaborn as sns

data1 = Q_results.pivot("gamma", "alpha_lr", "Mean reward")
ax = sns.heatmap(data1,annot=True,linewidths=.5,cbar_kws={'label': 'Mean Reward'})
fig = ax.get_figure()
fig.savefig('QMR.png') 
In [174]:
data2 = Q_results.pivot("gamma", "alpha_lr", "Epi_to_max")
ax = sns.heatmap(data2,linewidths=.5,cbar_kws={'label': 'Episodes to Max'})
fig = ax.get_figure()
fig.savefig('QETM.png') 
In [143]:
def run_SA_Q (gamma_param = [0.7,0.75,0.8,0.85,0.9],
              alpha_lr_param = [0.09,0.095,0.1,0.15,0.2],
              decay_param = [True],
              decay1 = [0.999],
              decay2 = [0.9999],
              verbose = True,
              plot_figures = True,
              parse_results = True
             ):
    
    max_experiments = len(gamma_param)*len(alpha_lr_param)
    results_list = []
    experiment_id = 1

    for d in decay1:
        for e in decay2:
            for i in gamma_param:
                for j in alpha_lr_param: 
                    rewards_list, Q_matrix, episodes_to_max = zip(*[sa_q_learning(cliff, num_episodes=5000, render=False, epsilon = 0.9, 
                                    epsilon_threshold = 0.8, decay1=d, decay2=e, alpha_lr =j, gamma=i) for _ in range(1)])
                    avg_rewards = np.mean(rewards_list, axis=0)
                    mean_reward = [np.mean(avg_rewards)] * len(avg_rewards)
                    #max_reward = np.max(q_learning_rewards, axis=0)

                    if plot_figures:
                        fig, ax = plt.subplots()
                        ax.set_xlabel('Episodes')
                        ax.set_ylabel('Rewards')
                        ax.set_title('Experiment ID: {} \n Params: gamma = {}, alpha_lr = {} \n Mean Reward: {} \n Episodes to find optimal path: {}'.format(experiment_id, i, j, mean_reward[0],"Not found" if min(episodes_to_max) == 99999 else min(episodes_to_max) ))

                        rolling_avg_rewards = pd.Series(avg_rewards).rolling(20, min_periods=20).mean()


                        ax.plot(rolling_avg_rewards)

                        ax.plot(mean_reward, 'g--')

                        if min(episodes_to_max) != 99999: 
                            ax.axvline(x=min(episodes_to_max), color='r', linestyle='--')

                        plt.tight_layout()
                        fig.savefig('{}_results_sa_q.jpg'.format(experiment_id), pad_inches=1)

                    results = {'Experiment_ID': experiment_id,
                               'gamma': i,
                               'alpha_lr': j,
                               'decay1':d,
                               'decay2':e,
                               'Mean reward': mean_reward[0],
                               'Epi_to_max' : min(episodes_to_max)
                              }

                    results_list.append(results)

                    if verbose: 
                        print ('Experiment {} of {} complete'.format(experiment_id, max_experiments))
                    experiment_id += 1

    if parse_results: 
        parsed_results_df = parse_results_list(results_list)
        return parsed_results_df

    else: 
        return results_list

SA_Q_results = run_SA_Q()
SA_Q_results['Algorithm'] = 'SAQ-Learning'
Episode: 0 completed
Found optimal path after only: 291 episodes
/Users/JamesPaulPhelan/anaconda3/lib/python3.6/site-packages/ipykernel_launcher.py:39: RuntimeWarning: overflow encountered in exp
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 1 of 25 complete
Episode: 0 completed
Found optimal path after only: 356 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 2 of 25 complete
Episode: 0 completed
Found optimal path after only: 260 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 3 of 25 complete
Episode: 0 completed
Found optimal path after only: 233 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 4 of 25 complete
Episode: 0 completed
Found optimal path after only: 235 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 5 of 25 complete
Episode: 0 completed
Found optimal path after only: 311 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 6 of 25 complete
Episode: 0 completed
Found optimal path after only: 282 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 7 of 25 complete
Episode: 0 completed
Found optimal path after only: 308 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 8 of 25 complete
Episode: 0 completed
Found optimal path after only: 226 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 9 of 25 complete
Episode: 0 completed
Found optimal path after only: 198 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 10 of 25 complete
Episode: 0 completed
Found optimal path after only: 261 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 11 of 25 complete
Episode: 0 completed
Found optimal path after only: 229 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 12 of 25 complete
Episode: 0 completed
Found optimal path after only: 295 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 13 of 25 complete
Episode: 0 completed
Found optimal path after only: 239 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 14 of 25 complete
Episode: 0 completed
Found optimal path after only: 202 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 15 of 25 complete
Episode: 0 completed
Found optimal path after only: 247 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 16 of 25 complete
Episode: 0 completed
Found optimal path after only: 275 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 17 of 25 complete
Episode: 0 completed
Found optimal path after only: 227 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 18 of 25 complete
Episode: 0 completed
Found optimal path after only: 218 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 19 of 25 complete
Episode: 0 completed
Found optimal path after only: 191 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 20 of 25 complete
Episode: 0 completed
Found optimal path after only: 216 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
/Users/JamesPaulPhelan/anaconda3/lib/python3.6/site-packages/matplotlib/pyplot.py:524: RuntimeWarning: More than 20 figures have been opened. Figures created through the pyplot interface (`matplotlib.pyplot.figure`) are retained until explicitly closed and may consume too much memory. (To control this warning, see the rcParam `figure.max_open_warning`).
  max_open_warning, RuntimeWarning)
Experiment 21 of 25 complete
Episode: 0 completed
Found optimal path after only: 248 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 22 of 25 complete
Episode: 0 completed
Found optimal path after only: 260 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 23 of 25 complete
Episode: 0 completed
Found optimal path after only: 246 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 24 of 25 complete
Episode: 0 completed
Found optimal path after only: 199 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 25 of 25 complete
Top 5 results, by episodes to find optimal path: 
     Epi_to_max  Experiment_ID  Mean reward  alpha_lr  decay1  decay2  gamma
19         191             20      72.2010      0.20   0.999  0.9999   0.85
9          198             10      70.0972      0.20   0.999  0.9999   0.75
24         199             25      69.6724      0.20   0.999  0.9999   0.90
14         202             15      68.7440      0.20   0.999  0.9999   0.80
20         216             21      71.1466      0.09   0.999  0.9999   0.90

 
 Top 5 results, by mean reward over 5000 episodes: 
     Epi_to_max  Experiment_ID  Mean reward  alpha_lr  decay1  decay2  gamma
15         247             16      72.7920     0.090   0.999  0.9999   0.85
17         227             18      72.5388     0.100   0.999  0.9999   0.85
0          291              1      72.3062     0.090   0.999  0.9999   0.70
6          282              7      72.2462     0.095   0.999  0.9999   0.75
19         191             20      72.2010     0.200   0.999  0.9999   0.85
In [193]:
SA_Q_results['Algorithm'] = 'SAQ-Learning'
SA_Q_results
SA_Q_results.to_csv('/Users/JamesPaulPhelan/Desktop/SA_Q_results', sep='\t')
In [175]:
data3 = SA_Q_results.pivot("gamma", "alpha_lr", "Mean reward")
ax = sns.heatmap(data3,annot=True,linewidths=.5,cbar_kws={'label': 'Mean Reward'})
fig = ax.get_figure()
fig.savefig('SAQMR.png') 
In [176]:
data4 = SA_Q_results.pivot("gamma", "alpha_lr", "Epi_to_max")
ax = sns.heatmap(data4,linewidths=.5,cbar_kws={'label': 'Episodes to Max'})
fig = ax.get_figure()
fig.savefig('SAQETM.png') 

run bqsa

In [166]:
def run_bqsa (gamma_param = [0.09,0.095,0.1,0.15,0.2],
              alpha_lr_param = [0.09,0.095,0.1,0.15,0.2],
              decay_param = [True],
              decay1 = [0.999],
              decay2 = [0.999],
              verbose = True,
              plot_figures = True,
              parse_results = True,
              render1 = False
              ):
    
    max_experiments = len(gamma_param)*len(alpha_lr_param)
    results_list = []
    experiment_id = 1

    for d in decay1:
        for e in decay2:
            
            for i in gamma_param:
                for j in alpha_lr_param: 
                    rewards_list, Q_matrix, episodes_to_max, M = zip(*[bqsa_learning(cliff, alpha_lr =j, gamma=i,decay1=d, decay2=e, epsilon = 0.9, 
                                            epsilon_threshold = 0.8) for _ in range(1)])
                    avg_rewards = np.mean(rewards_list, axis=0)
                    mean_reward = [np.mean(avg_rewards)] * len(avg_rewards)
                    #max_reward = np.max(q_learning_rewards, axis=0)

                    if plot_figures:
                        fig, ax = plt.subplots()
                        ax.set_xlabel('Episodes')
                        ax.set_ylabel('Rewards')
                        ax.set_title('Experiment ID: {} \n Params: gamma = {}, alpha_lr = {} \n Mean Reward: {} \n Episodes to find optimal path: {}'.format(experiment_id, i, j, mean_reward[0],"Not found" if min(episodes_to_max) == 99999 else min(episodes_to_max) ))
                        rolling_avg_rewards = pd.Series(avg_rewards).rolling(20, min_periods=20).mean()
                        ax.plot(rolling_avg_rewards, color='orange')
                        ax.plot(mean_reward, 'g--')
                        if min(episodes_to_max) != 99999: 
                            ax.axvline(x=min(episodes_to_max), color='r', linestyle='--')
                        plt.tight_layout()
                        plt.subplots_adjust(top=0.88)
                        fig.savefig('{}_results_bqsa.jpg'.format(experiment_id), pad_inches=1)

                    results = {'Experiment_ID': experiment_id,
                               'gamma': i,
                               'alpha_lr': j,
                               'decay1':d,
                               'decay2':e,
                               'Mean reward': mean_reward[0],
                               'Epi_to_max' : min(episodes_to_max)
                              }

                    results_list.append(results)

                    if verbose: 
                        print ('Experiment {} of {} complete'.format(experiment_id, max_experiments))
                    experiment_id += 1

    if parse_results: 
        parsed_results_df = parse_results_list(results_list)
        return parsed_results_df

    else: 
        return results_list

bqsa_results = run_bqsa()
bqsa_results['Algorithm'] = 'BQSA-Learning'
Episode: 0 completed
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 1 of 25 complete
Episode: 0 completed
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 2 of 25 complete
Episode: 0 completed
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 3 of 25 complete
Episode: 0 completed
Found optimal path after only: 282 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 4 of 25 complete
Episode: 0 completed
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 5 of 25 complete
Episode: 0 completed
Found optimal path after only: 357 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 6 of 25 complete
Episode: 0 completed
Found optimal path after only: 325 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 7 of 25 complete
Episode: 0 completed
Found optimal path after only: 321 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 8 of 25 complete
Episode: 0 completed
Found optimal path after only: 234 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 9 of 25 complete
Episode: 0 completed
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 10 of 25 complete
Episode: 0 completed
Found optimal path after only: 297 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 11 of 25 complete
Episode: 0 completed
Found optimal path after only: 315 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 12 of 25 complete
Episode: 0 completed
Found optimal path after only: 313 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 13 of 25 complete
Episode: 0 completed
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 14 of 25 complete
Episode: 0 completed
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 15 of 25 complete
Episode: 0 completed
Found optimal path after only: 233 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 16 of 25 complete
Episode: 0 completed
Found optimal path after only: 240 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 17 of 25 complete
Episode: 0 completed
Found optimal path after only: 272 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 18 of 25 complete
Episode: 0 completed
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 19 of 25 complete
Episode: 0 completed
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 20 of 25 complete
Episode: 0 completed
Found optimal path after only: 292 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
/Users/JamesPaulPhelan/anaconda3/lib/python3.6/site-packages/matplotlib/pyplot.py:524: RuntimeWarning: More than 20 figures have been opened. Figures created through the pyplot interface (`matplotlib.pyplot.figure`) are retained until explicitly closed and may consume too much memory. (To control this warning, see the rcParam `figure.max_open_warning`).
  max_open_warning, RuntimeWarning)
Experiment 21 of 25 complete
Episode: 0 completed
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 22 of 25 complete
Episode: 0 completed
Found optimal path after only: 239 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 23 of 25 complete
Episode: 0 completed
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 24 of 25 complete
Episode: 0 completed
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 25 of 25 complete
Top 5 results, by episodes to find optimal path: 
     Epi_to_max  Experiment_ID  Mean reward  alpha_lr  decay1  decay2  gamma
15         233             16      76.0560     0.090   0.999   0.999  0.150
8          234              9      78.0350     0.150   0.999   0.999  0.095
22         239             23      77.4404     0.100   0.999   0.999  0.200
16         240             17      76.5356     0.095   0.999   0.999  0.150
17         272             18      76.8526     0.100   0.999   0.999  0.150

 
 Top 5 results, by mean reward over 5000 episodes: 
     Epi_to_max  Experiment_ID  Mean reward  alpha_lr  decay1  decay2  gamma
8          234              9      78.0350      0.15   0.999   0.999  0.095
3          282              4      78.0046      0.15   0.999   0.999  0.090
22         239             23      77.4404      0.10   0.999   0.999  0.200
20         292             21      76.9804      0.09   0.999   0.999  0.200
17         272             18      76.8526      0.10   0.999   0.999  0.150
In [168]:
data5 = bqsa_results.pivot("gamma", "alpha_lr", "Mean reward")
ax = sns.heatmap(data5,annot=True,linewidths=.5,cbar_kws={'label': 'Mean Reward'})
In [169]:
data6 = bqsa_results.pivot("gamma", "alpha_lr", "Epi_to_max")
ax = sns.heatmap(data6,annot=False,linewidths=.5,cbar_kws={'label': 'Episodes to Max'})

Comparison of algorithms and performance

In [170]:
fig, ax = plt.subplots()
ax.set_xlabel('Episodes to max')
ax.set_ylabel('Max rewards')
ax.set_title('Comparison betweeen performance of Q-Learning, SAQ-Learning and BQSA-Learning')

ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling
ax.plot(Q_results.Epi_to_max, Q_results['Mean reward'], marker='o', linestyle='', ms=12, label='Q-Learning')
ax.plot(SA_Q_results.Epi_to_max, SA_Q_results['Mean reward'], marker='o', linestyle='', ms=12, label='SAQ-Learning')
ax.plot(bqsa_results.Epi_to_max, bqsa_results['Mean reward'], marker='o', linestyle='', ms=12, label='BQSA-Learning')

ax.legend()

plt.show()
In [187]:
def run_bqsa (gamma_param = [0.09,0.095,0.1,0.15,0.2],
              alpha_lr_param = [0.09,0.095,0.1,0.15,0.2],
              decay_param = [True],
              decay1 = [0.999],
              decay2 = [0.999],
              verbose = True,
              plot_figures = True,
              parse_results = True,
              render1 = False
              ):
    
    max_experiments = len(gamma_param)*len(alpha_lr_param)
    results_list = []
    experiment_id = 1

    for d in decay1:
        for e in decay2:
            
            for i in gamma_param:
                for j in alpha_lr_param: 
                    rewards_list, Q_matrix, episodes_to_max, M = zip(*[bqsa_learning(cliff, alpha_lr =j, gamma=i,decay1=d, decay2=e, epsilon = 0.9, 
                                            epsilon_threshold = 0.8) for _ in range(1)])
                    avg_rewards = np.mean(rewards_list, axis=0)
                    mean_reward = [np.mean(avg_rewards)] * len(avg_rewards)
                    #max_reward = np.max(q_learning_rewards, axis=0)

                    if plot_figures:
                        fig, ax = plt.subplots()
                        ax.set_xlabel('Episodes')
                        ax.set_ylabel('Rewards')
                        ax.set_title('Experiment ID: {} \n Params: gamma = {}, alpha_lr = {} \n Mean Reward: {} \n Episodes to find optimal path: {}'.format(experiment_id, i, j, mean_reward[0],"Not found" if min(episodes_to_max) == 99999 else min(episodes_to_max) ))
                        rolling_avg_rewards = pd.Series(avg_rewards).rolling(20, min_periods=20).mean()
                        ax.plot(rolling_avg_rewards, color='orange')
                        ax.plot(mean_reward, 'g--')
                        if min(episodes_to_max) != 99999: 
                            ax.axvline(x=min(episodes_to_max), color='r', linestyle='--')
                        plt.tight_layout()
                        #plt.subplots_adjust(top=0.88)
                        fig.savefig('{}_results_bqsa.jpg'.format(experiment_id), pad_inches=1)

                    results = {'Experiment_ID': experiment_id,
                               'gamma': i,
                               'alpha_lr': j,
                               'decay1':d,
                               'decay2':e,
                               'Mean reward': mean_reward[0],
                               'Epi_to_max' : min(episodes_to_max)
                              }

                    results_list.append(results)

                    if verbose: 
                        print ('Experiment {} of {} complete'.format(experiment_id, max_experiments))
                    experiment_id += 1

    if parse_results: 
        parsed_results_df = parse_results_list(results_list)
        return parsed_results_df

    else: 
        return results_list

bqsa_results2 = run_bqsa()
bqsa_results2['Algorithm'] = 'BQSA-Learning'
Episode: 0 completed
Found optimal path after only: 438 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 1 of 25 complete
Episode: 0 completed
Found optimal path after only: 345 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 2 of 25 complete
Episode: 0 completed
Found optimal path after only: 295 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 3 of 25 complete
Episode: 0 completed
Found optimal path after only: 202 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 4 of 25 complete
Episode: 0 completed
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 5 of 25 complete
Episode: 0 completed
Found optimal path after only: 338 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 6 of 25 complete
Episode: 0 completed
Found optimal path after only: 369 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 7 of 25 complete
Episode: 0 completed
Found optimal path after only: 274 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 8 of 25 complete
Episode: 0 completed
Found optimal path after only: 230 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 9 of 25 complete
Episode: 0 completed
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 10 of 25 complete
Episode: 0 completed
Found optimal path after only: 350 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 11 of 25 complete
Episode: 0 completed
Found optimal path after only: 339 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 12 of 25 complete
Episode: 0 completed
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 13 of 25 complete
Episode: 0 completed
Found optimal path after only: 388 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 14 of 25 complete
Episode: 0 completed
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 15 of 25 complete
Episode: 0 completed
Found optimal path after only: 295 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 16 of 25 complete
Episode: 0 completed
Found optimal path after only: 300 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 17 of 25 complete
Episode: 0 completed
Found optimal path after only: 280 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 18 of 25 complete
Episode: 0 completed
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 19 of 25 complete
Episode: 0 completed
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 20 of 25 complete
Episode: 0 completed
Found optimal path after only: 252 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
/Users/JamesPaulPhelan/anaconda3/lib/python3.6/site-packages/matplotlib/pyplot.py:524: RuntimeWarning: More than 20 figures have been opened. Figures created through the pyplot interface (`matplotlib.pyplot.figure`) are retained until explicitly closed and may consume too much memory. (To control this warning, see the rcParam `figure.max_open_warning`).
  max_open_warning, RuntimeWarning)
Experiment 21 of 25 complete
Episode: 0 completed
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 22 of 25 complete
Episode: 0 completed
Found optimal path after only: 211 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 23 of 25 complete
Episode: 0 completed
Found optimal path after only: 201 episodes
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 24 of 25 complete
Episode: 0 completed
Episode: 1000 completed
Episode: 2000 completed
Episode: 3000 completed
Episode: 4000 completed
Experiment 25 of 25 complete
Top 5 results, by episodes to find optimal path: 
     Epi_to_max  Experiment_ID  Mean reward  alpha_lr  decay1  decay2  gamma
23         201             24      79.0900      0.15   0.999   0.999  0.200
3          202              4      77.8998      0.15   0.999   0.999  0.090
22         211             23      76.9164      0.10   0.999   0.999  0.200
8          230              9      77.4480      0.15   0.999   0.999  0.095
20         252             21      76.8344      0.09   0.999   0.999  0.200

 
 Top 5 results, by mean reward over 5000 episodes: 
     Epi_to_max  Experiment_ID  Mean reward  alpha_lr  decay1  decay2  gamma
23         201             24      79.0900      0.15   0.999   0.999  0.200
3          202              4      77.8998      0.15   0.999   0.999  0.090
8          230              9      77.4480      0.15   0.999   0.999  0.095
9        99999             10      77.0958      0.20   0.999   0.999  0.095
22         211             23      76.9164      0.10   0.999   0.999  0.200
In [192]:
bqsa_results2
bqsa_results2.to_csv('/Users/JamesPaulPhelan/Desktop/bqsa_results2', sep='\t')
In [189]:
data7 = bqsa_results2.pivot("gamma", "alpha_lr", "Mean reward")
ax = sns.heatmap(data7,annot=True,linewidths=.5,cbar_kws={'label': 'Mean reward'})
fig = ax.get_figure()
fig.savefig('BQSAMR.png')
In [190]:
data8 = bqsa_results2.pivot("gamma", "alpha_lr", "Epi_to_max")
ax = sns.heatmap(data8,annot=False,linewidths=.5,cbar_kws={'label': 'Episodes to Max'})
fig = ax.get_figure()
fig.savefig('BQSAETM.png') 
In [191]:
fig, ax = plt.subplots()
ax.set_xlabel('Episodes to max')
ax.set_ylabel('Mean reward')
ax.set_title('Comparison betweeen performance of Q-Learning, SAQ-Learning and BQSA-Learning')

ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling
ax.plot(Q_results.Epi_to_max, Q_results['Mean reward'], marker='o', linestyle='', ms=12, label='Q-Learning')
ax.plot(SA_Q_results.Epi_to_max, SA_Q_results['Mean reward'], marker='o', linestyle='', ms=12, label='SAQ-Learning')
ax.plot(bqsa_results2.Epi_to_max, bqsa_results2['Mean reward'], marker='o', linestyle='', ms=12, label='BQSA-Learning')

ax.legend()
fig = ax.get_figure()
fig.savefig('Comparison.png') 

plt.show()