In [1]:
#Importing dependencies
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
In [2]:
#Defining contextual bandits, four four arm bandits
class contextual_bandit():
def __init__(self):
self.state = 0
#List out our bandits. Currently arms 4, 2, 1 and 1 (respectively) are the most optimal.
self.bandits = np.array([[0.2,0,-0.0,-5],[0.1,-5,1,0.25],[-5,5,5,5],[-5,0.2,0,1]])
self.num_bandits = self.bandits.shape[0]
self.num_actions = self.bandits.shape[1]
def getBandit(self):
self.state = np.random.randint(0,len(self.bandits)) #Returns a random state for each episode.
return self.state
def pullArm(self,action):
#Get a random number.
bandit = self.bandits[self.state,action]
result = np.random.randn(1)
if result > bandit:
#return a positive reward.
return 1
else:
#return a negative reward.
return -1
In [3]:
#Defining Policy based agent. It takes input current state and retruns action
class agent():
def __init__(self, lr, s_size,a_size):
#These lines established the feed-forward part of the network. The agent takes a state and produces an action.
self.state_in= tf.placeholder(shape=[1],dtype=tf.int32)
state_in_OH = slim.one_hot_encoding(self.state_in,s_size)
output = slim.fully_connected(state_in_OH,a_size,\
biases_initializer=None,activation_fn=tf.nn.sigmoid,weights_initializer=tf.ones_initializer())
self.output = tf.reshape(output,[-1])
self.chosen_action = tf.argmax(self.output,0)
#The next six lines establish the training proceedure. We feed the reward and chosen action into the network
#to compute the loss, and use it to update the network.
self.reward_holder = tf.placeholder(shape=[1],dtype=tf.float32)
self.action_holder = tf.placeholder(shape=[1],dtype=tf.int32)
self.responsible_weight = tf.slice(self.output,self.action_holder,[1])
self.loss = -(tf.log(self.responsible_weight)*self.reward_holder)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=lr)
self.update = optimizer.minimize(self.loss)
In [4]:
#Training network by getting state from environment, take an action and receive reward
tf.reset_default_graph() #Clear the Tensorflow graph.
cBandit = contextual_bandit() #Load the bandits.
myAgent = agent(lr=0.001,s_size=cBandit.num_bandits,a_size=cBandit.num_actions) #Load the agent.
weights = tf.trainable_variables()[0] #The weights we will evaluate to look into the network.
total_episodes = 10000 #Set total number of episodes to train agent on.
total_reward = np.zeros([cBandit.num_bandits,cBandit.num_actions]) #Set scoreboard for bandits to 0.
e = 0.1 #Set the chance of taking a random action.
init = tf.global_variables_initializer()
# Launch the tensorflow graph
with tf.Session() as sess:
sess.run(init)
i = 0
while i < total_episodes:
s = cBandit.getBandit() #Get a state from the environment.
#Choose either a random action or one from our network.
if np.random.rand(1) < e:
action = np.random.randint(cBandit.num_actions)
else:
action = sess.run(myAgent.chosen_action,feed_dict={myAgent.state_in:[s]})
reward = cBandit.pullArm(action) #Get our reward for taking an action given a bandit.
#Update the network.
feed_dict={myAgent.reward_holder:[reward],myAgent.action_holder:[action],myAgent.state_in:[s]}
_,ww = sess.run([myAgent.update,weights], feed_dict=feed_dict)
#Update our running tally of scores.
total_reward[s,action] += reward
if i % 500 == 0:
print("Mean reward for each of the " + str(cBandit.num_bandits) + " bandits: " + str(np.mean(total_reward,axis=1)))
i+=1
for a in range(cBandit.num_bandits):
print("The agent thinks action " + str(np.argmax(ww[a])+1) + " for bandit " + str(a+1) + " is the most promising....")
if np.argmax(ww[a]) == np.argmin(cBandit.bandits[a]):
print("...and it was right!")
else:
print("...and it was wrong!")
In [5]:
# Now assuming each bandit to be person and each arm to be a genre of film.
#Creating a dataset which includes ratings of 4 persons for 4 different movie genres
import pandas as pd
genre_names = ["Adventure", "Sci-Fi", "Romance", "Horror"]
person_names = ["person1", "person2", "person3", "person4"]
df = pd.DataFrame(index = person_names, columns = genre_names)
In [6]:
#Now let person 1 rate his preferences of genre on a scale of 5
df.loc['person1'] = pd.Series({'Adventure':1, 'Sci-Fi':5, 'Romance':2, 'Horror':1})
#Similarly for others
df.loc['person2'] = pd.Series({'Adventure':1, 'Sci-Fi':2, 'Romance':1, 'Horror':5})
df.loc['person3'] = pd.Series({'Adventure':5, 'Sci-Fi':3, 'Romance':2, 'Horror':2})
df.loc['person4'] = pd.Series({'Adventure':1, 'Sci-Fi':2, 'Romance':5, 'Horror':1})
In [7]:
#Looking at the dataset
df
Out[7]:
In [8]:
#Now repersenting this dataset in terms of bandit problem. Lower the bandit number more likely a positive reward.
#Convert all positive scores to negative and train the bandit problem as defined above.
class contextual_bandit():
def __init__(self):
self.state = 0
#Assuming bandits to be persons and arms as genre. Currently genres Sci-Fi,Horror,Adventure,Romance are
#the most optimal respectively
self.bandits = np.array([[-1,-5,-2,-1],[-1,-2,-1,-5],[-5,-3,-2,-2],[-1,-2,-5,-1]])
self.num_bandits = self.bandits.shape[0]
self.num_actions = self.bandits.shape[1]
def getBandit(self):
self.state = np.random.randint(0,len(self.bandits)) #Returns a random state for each episode.
return self.state
def pullArm(self,action):
#Get a random number.
bandit = self.bandits[self.state,action]
result = np.random.randn(1)
if result > bandit:
#return a positive reward.
return 1
else:
#return a negative reward.
return -1
In [9]:
#Training network by getting state from environment, take an action and receive reward
tf.reset_default_graph() #Clear the Tensorflow graph.
cBandit = contextual_bandit() #Load the bandits.
myAgent = agent(lr=0.001,s_size=cBandit.num_bandits,a_size=cBandit.num_actions) #Load the agent.
weights = tf.trainable_variables()[0] #The weights we will evaluate to look into the network.
total_episodes = 10000 #Set total number of episodes to train agent on.
total_reward = np.zeros([cBandit.num_bandits,cBandit.num_actions]) #Set scoreboard for bandits to 0.
e = 0.1 #Set the chance of taking a random action.
init = tf.global_variables_initializer()
# Launch the tensorflow graph
with tf.Session() as sess:
sess.run(init)
i = 0
while i < total_episodes:
s = cBandit.getBandit() #Get a state from the environment.
#Choose either a random action or one from our network.
if np.random.rand(1) < e:
action = np.random.randint(cBandit.num_actions)
else:
action = sess.run(myAgent.chosen_action,feed_dict={myAgent.state_in:[s]})
reward = cBandit.pullArm(action) #Get our reward for taking an action given a bandit.
#Update the network.
feed_dict={myAgent.reward_holder:[reward],myAgent.action_holder:[action],myAgent.state_in:[s]}
_,ww = sess.run([myAgent.update,weights], feed_dict=feed_dict)
#Update our running tally of scores.
total_reward[s,action] += reward
if i % 500 == 0:
print("Mean reward for each of the " + str(cBandit.num_bandits) + " persons: " + str(np.mean(total_reward,axis=1)))
i+=1
for a in range(cBandit.num_bandits):
print("The agent thinks genre " + str(np.argmax(ww[a])+1) + " for person " + str(a+1) + " is the most promising....")
if np.argmax(ww[a]) == np.argmin(cBandit.bandits[a]):
print("...and it was right!")
else:
print("...and it was wrong!")
In [ ]:
#It does not get it right for all persons.I am assuming the use case might be wrong or data needs some pre-processing.
#Will have to learn more about it