User groups


In [146]:
request = "POST https://analyticsreporting.googleapis.com/v4/reports:batchGet?fields=reports(columnHeader%2Cdata(rows%2Ctotals))&key={YOUR_API_KEY}"
request = {
 "reportRequests": [
  {
   "viewId": "123303369",
   "dateRanges": [
    {
     "startDate": "2017-01-01",
     "endDate": "2017-04-30"
    }
   ],
   "metrics": [
    {
     "expression": "ga:sessions"
    },
    {
     "expression": "ga:sessionDuration"
    },
    {
     "expression": "ga:goal1Completions"
    },
    {
     "expression": "ga:bounceRate"
    }
   ],
   "dimensions": [
    {
     "name": "ga:city"
    },
    {
     "name": "ga:userAgeBracket"
    }
   ]
  }
 ]
}

In [147]:
import json

with open('data/TMRW_user_groups.json') as file:
    input_ugroups = json.load(file)
    
#input_ugroups

# Define dimensions list
input_ugroups_dimensions = input_ugroups['reports'][0]['columnHeader']['dimensions']

dimension_count = len(input_ugroups_dimensions)

# Define metrics list
input_ugroups_metrics = input_ugroups['reports'][0]['columnHeader']['metricHeader']['metricHeaderEntries']

def create_metric_list(raw_data):
    lst = []
    for item in raw_data:
        lst.append(item['name'])
    return lst

input_ugroups_metrics = create_metric_list(input_ugroups_metrics)


# Create input data

input_ugroups_data = input_ugroups['reports'][0]['data']['rows']

input_ugroups_data


Out[147]:
[{'dimensions': ['Croydon', '18-24'],
  'metrics': [{'values': ['101',
     '41.584158415841586',
     '4',
     '3.9603960396039604']}]},
 {'dimensions': ['Croydon', '25-34'],
  'metrics': [{'values': ['334',
     '47.90419161676647',
     '17',
     '5.089820359281437']}]},
 {'dimensions': ['Croydon', '35-44'],
  'metrics': [{'values': ['223',
     '43.04932735426009',
     '7',
     '3.1390134529147984']}]},
 {'dimensions': ['Croydon', '45-54'],
  'metrics': [{'values': ['90',
     '45.55555555555556',
     '2',
     '2.2222222222222223']}]},
 {'dimensions': ['Croydon', '55-64'],
  'metrics': [{'values': ['32', '53.125', '1', '3.125']}]},
 {'dimensions': ['London', '18-24'],
  'metrics': [{'values': ['167',
     '49.700598802395206',
     '8',
     '4.790419161676647']}]},
 {'dimensions': ['London', '25-34'],
  'metrics': [{'values': ['842',
     '59.14489311163895',
     '24',
     '2.8503562945368173']}]},
 {'dimensions': ['London', '35-44'],
  'metrics': [{'values': ['482',
     '54.77178423236515',
     '14',
     '2.904564315352697']}]},
 {'dimensions': ['London', '45-54'],
  'metrics': [{'values': ['205',
     '55.60975609756098',
     '3',
     '1.4634146341463417']}]},
 {'dimensions': ['London', '55-64'],
  'metrics': [{'values': ['37', '48.64864864864865', '0', '0.0']}]}]

In [148]:
values_list = []
for group in input_ugroups_data:
    new_dim_name = group['dimensions'][0] + ", " + group['dimensions'][1]
    group[new_dim_name] = group['metrics'][0]
    del group['dimensions']
    del group['metrics']
    
    group[new_dim_name]['values'] = list(map(float,group[new_dim_name]['values']))
    #conv_rate = round(float(int(group[new_dim_name]['values'][2])/int(group[new_dim_name]['values'][0])*100),2)
    values_list.append(group[new_dim_name]['values'])
    
    group[new_dim_name]['values'].insert(0,new_dim_name)
    #print(group[new_dim_name]['values'])
    #group[new_dim_name]['values'].append(conv_rate)
    
    
#values_list
input_ugroups_data


Out[148]:
[{'Croydon, 18-24': {'values': ['Croydon, 18-24',
    101.0,
    41.584158415841586,
    4.0,
    3.9603960396039604]}},
 {'Croydon, 25-34': {'values': ['Croydon, 25-34',
    334.0,
    47.90419161676647,
    17.0,
    5.089820359281437]}},
 {'Croydon, 35-44': {'values': ['Croydon, 35-44',
    223.0,
    43.04932735426009,
    7.0,
    3.1390134529147984]}},
 {'Croydon, 45-54': {'values': ['Croydon, 45-54',
    90.0,
    45.55555555555556,
    2.0,
    2.2222222222222223]}},
 {'Croydon, 55-64': {'values': ['Croydon, 55-64', 32.0, 53.125, 1.0, 3.125]}},
 {'London, 18-24': {'values': ['London, 18-24',
    167.0,
    49.700598802395206,
    8.0,
    4.790419161676647]}},
 {'London, 25-34': {'values': ['London, 25-34',
    842.0,
    59.14489311163895,
    24.0,
    2.8503562945368173]}},
 {'London, 35-44': {'values': ['London, 35-44',
    482.0,
    54.77178423236515,
    14.0,
    2.904564315352697]}},
 {'London, 45-54': {'values': ['London, 45-54',
    205.0,
    55.60975609756098,
    3.0,
    1.4634146341463417]}},
 {'London, 55-64': {'values': ['London, 55-64',
    37.0,
    48.64864864864865,
    0.0,
    0.0]}}]

In [149]:
# Define each metric dict

ugroups_data = {}

for ugroup in input_ugroups_data:
    #print (ugroup)
    
    for gr in ugroup:
        ugroups_data[gr] = {'sessions':0,
                       'bounce_rate':0,
                       'conversions':0,
                       'conversion_rate':0}
        
        ugroups_data[gr]['sessions'] = round(float(ugroup[gr]['values'][1]),2)
        ugroups_data[gr]['conversions'] = round(float(ugroup[gr]['values'][2]),2)
        ugroups_data[gr]['bounce_rate'] = round(float(ugroup[gr]['values'][3]),2)
        ugroups_data[gr]['conversion_rate'] = round(float(ugroup[gr]['values'][4]),2)
        
ugroups_data


Out[149]:
{'Croydon, 18-24': {'bounce_rate': 4.0,
  'conversion_rate': 3.96,
  'conversions': 41.58,
  'sessions': 101.0},
 'Croydon, 25-34': {'bounce_rate': 17.0,
  'conversion_rate': 5.09,
  'conversions': 47.9,
  'sessions': 334.0},
 'Croydon, 35-44': {'bounce_rate': 7.0,
  'conversion_rate': 3.14,
  'conversions': 43.05,
  'sessions': 223.0},
 'Croydon, 45-54': {'bounce_rate': 2.0,
  'conversion_rate': 2.22,
  'conversions': 45.56,
  'sessions': 90.0},
 'Croydon, 55-64': {'bounce_rate': 1.0,
  'conversion_rate': 3.12,
  'conversions': 53.12,
  'sessions': 32.0},
 'London, 18-24': {'bounce_rate': 8.0,
  'conversion_rate': 4.79,
  'conversions': 49.7,
  'sessions': 167.0},
 'London, 25-34': {'bounce_rate': 24.0,
  'conversion_rate': 2.85,
  'conversions': 59.14,
  'sessions': 842.0},
 'London, 35-44': {'bounce_rate': 14.0,
  'conversion_rate': 2.9,
  'conversions': 54.77,
  'sessions': 482.0},
 'London, 45-54': {'bounce_rate': 3.0,
  'conversion_rate': 1.46,
  'conversions': 55.61,
  'sessions': 205.0},
 'London, 55-64': {'bounce_rate': 0.0,
  'conversion_rate': 0.0,
  'conversions': 48.65,
  'sessions': 37.0}}

In [150]:
rows = list(ugroups_data.keys())
rows


Out[150]:
['Croydon, 18-24',
 'Croydon, 25-34',
 'Croydon, 35-44',
 'Croydon, 45-54',
 'Croydon, 55-64',
 'London, 18-24',
 'London, 25-34',
 'London, 35-44',
 'London, 45-54',
 'London, 55-64']

In [151]:
import collections
from collections import OrderedDict

columns = []
for u in ugroups_data:
    #print (u)
    for metric in ugroups_data[u]:
        columns.append('city_age')
        columns.append(metric)
        
    
columns = list(OrderedDict.fromkeys(columns))    
columns


Out[151]:
['city_age', 'sessions', 'bounce_rate', 'conversions', 'conversion_rate']

In [152]:
import pandas as pd

In [153]:
df = pd.DataFrame(values_list,
                  columns = columns)

df.to_json(orient='split')
table = pd.read_json(df.to_json(orient='split'), orient='split')
table.conversion_rate = round(table.conversion_rate,2)
table.bounce_rate = round(table.bounce_rate,2)
table


Out[153]:
city_age sessions bounce_rate conversions conversion_rate
0 Croydon, 18-24 101 41.58 4 3.96
1 Croydon, 25-34 334 47.90 17 5.09
2 Croydon, 35-44 223 43.05 7 3.14
3 Croydon, 45-54 90 45.56 2 2.22
4 Croydon, 55-64 32 53.12 1 3.12
5 London, 18-24 167 49.70 8 4.79
6 London, 25-34 842 59.14 24 2.85
7 London, 35-44 482 54.77 14 2.90
8 London, 45-54 205 55.61 3 1.46
9 London, 55-64 37 48.65 0 0.00

In [154]:
table2 = pd.DataFrame([['Croydon, 18-24',0],['Croydon, 25-34',1],['London, 18-24',0],['Croydon, 45-54',1]],
                       columns = ['city_age','cluster'])

table2


Out[154]:
city_age cluster
0 Croydon, 18-24 0
1 Croydon, 25-34 1
2 London, 18-24 0
3 Croydon, 45-54 1

In [155]:
data = table2.merge(table, on='city_age')
data


Out[155]:
city_age cluster sessions bounce_rate conversions conversion_rate
0 Croydon, 18-24 0 101 41.58 4 3.96
1 Croydon, 25-34 1 334 47.90 17 5.09
2 London, 18-24 0 167 49.70 8 4.79
3 Croydon, 45-54 1 90 45.56 2 2.22

In [156]:
samples1=[]
for i in range(0,len(table)):
    a = [table.sessions[i]]
    #print(a)
    samples1.append(a)
samples1
#return samples1
#print(samples1)


Out[156]:
[[101], [334], [223], [90], [32], [167], [842], [482], [205], [37]]

In [157]:
mv = sum(table.sessions)/len(samples1)
mv


Out[157]:
251.30000000000001

In [158]:
import random
import math

NUM_CLUSTERS = 3
TOTAL_DATA = len(samples1)
LOWEST_SAMPLE_POINT = samples1.index(min(samples1)) #element 9 of SAMPLES.
HIGHEST_SAMPLE_POINT = samples1.index(max(samples1)) #element 6 of SAMPLES.
Middle_SAMPLE_POINT = 2
BIG_NUMBER = math.pow(10, 10)


SAMPLES = samples1
data1 = []
centroids = []

class DataPoint:
    def __init__(self, x):
        self.x = x
          
    def set_x(self, x):
        self.x = x
    
    def get_x(self):
        return self.x
      
    def set_cluster(self, clusterNumber):
        self.clusterNumber = clusterNumber
    
    def get_cluster(self):
        return self.clusterNumber

class Centroid:
    def __init__(self, x):
        self.x = x
    
    def set_x(self, x):
        self.x = x
    
    def get_x(self):
        return self.x

In [ ]:


In [159]:
def initialize_centroids():
    # Set the centoid coordinates to match the data points furthest from each other.
    # In this example, [31, 51.613, 1, 3.2260000000000004] and [758, 59.234999999999999, 22, 2.9019999999999997]
    centroids.append(Centroid(SAMPLES[LOWEST_SAMPLE_POINT][0]))
    centroids.append(Centroid(SAMPLES[HIGHEST_SAMPLE_POINT][0]))
    centroids.append(Centroid(SAMPLES[Middle_SAMPLE_POINT][0]))
    
    print("Centroids initialized at:")
    print("(", centroids[0].get_x(),")")
    print("(", centroids[1].get_x(),")")
    print("(", centroids[2].get_x(),")")
    print()
    return
#print(initialize_centroids())
#print(centroids.append(Centroid(SAMPLES[HIGHEST_SAMPLE_POINT][0])))

In [160]:
def initialize_datapoints():
    # DataPoint objects' x and y values are taken from the SAMPLE array.
    # The DataPoints associated with LOWEST_SAMPLE_POINT and HIGHEST_SAMPLE_POINT are initially
    # assigned to the clusters matching the LOWEST_SAMPLE_POINT and HIGHEST_SAMPLE_POINT centroids.
    for i in range(TOTAL_DATA):
        newPoint = DataPoint(SAMPLES[i][0])
        
        if(i == LOWEST_SAMPLE_POINT):
            newPoint.set_cluster(0)
        elif(i == HIGHEST_SAMPLE_POINT):
            newPoint.set_cluster(1)
        elif(i == Middle_SAMPLE_POINT):
            newPoint.set_cluster(2)
        else:
            newPoint.set_cluster(None)
            
        data1.append(newPoint)
    
    return

In [ ]:


In [161]:
def get_distance(dataPointX, centroidX):
    # Calculate Euclidean distance.
    return math.sqrt(math.pow((centroidX - dataPointX), 2))

In [162]:
def recalculate_centroids():
    totalX = 0
    totalInCluster = 0
    
    for j in range(NUM_CLUSTERS):
        for k in range(len(data1)):
            if(data1[k].get_cluster() == j):
                totalX += data1[k].get_x()
                totalInCluster += 1
        
        if(totalInCluster > 0):
            centroids[j].set_x(totalX / totalInCluster)
               
    return

print(recalculate_centroids())


None

In [163]:
def update_clusters():
    isStillMoving = 0
    
    for i in range(TOTAL_DATA):
        bestMinimum = BIG_NUMBER
        currentCluster = 0
        
        for j in range(NUM_CLUSTERS):
            distance = get_distance(data1[i].get_x(), centroids[j].get_x())
            if(distance < bestMinimum):
                bestMinimum = distance
                currentCluster = j
        
        data1[i].set_cluster(currentCluster)
        
        if(data1[i].get_cluster() is None or data1[i].get_cluster() != currentCluster):
            data1[i].set_cluster(currentCluster)
            isStillMoving = 1
    
    return isStillMoving

In [164]:
def perform_kmeans():
    isStillMoving = 1
    
    initialize_centroids()
    
    initialize_datapoints()
    
    while(isStillMoving):
        recalculate_centroids()
        isStillMoving = update_clusters()
    
    return

perform_kmeans()


Centroids initialized at:
( 32 )
( 842 )
( 223 )


In [165]:
def print_results():
    for i in range(NUM_CLUSTERS):
        print("Cluster ", i, " includes:")
        for j in range(TOTAL_DATA):
            if(data1[j].get_cluster() == i):
                s = [data1[j].get_x()]
                #print("(", data1[j].get_x(), ")")
                print(s)
        print()
    
    return


print_results()
#print(data1[j].get_x())


Cluster  0  includes:
[101]
[90]
[32]
[167]
[37]

Cluster  1  includes:
[842]
[482]

Cluster  2  includes:
[334]
[223]
[205]


In [166]:
table.sort_values(by='conversion_rate')


Out[166]:
city_age sessions bounce_rate conversions conversion_rate
9 London, 55-64 37 48.65 0 0.00
8 London, 45-54 205 55.61 3 1.46
3 Croydon, 45-54 90 45.56 2 2.22
6 London, 25-34 842 59.14 24 2.85
7 London, 35-44 482 54.77 14 2.90
4 Croydon, 55-64 32 53.12 1 3.12
2 Croydon, 35-44 223 43.05 7 3.14
0 Croydon, 18-24 101 41.58 4 3.96
5 London, 18-24 167 49.70 8 4.79
1 Croydon, 25-34 334 47.90 17 5.09

In [167]:
sum(table.conversion_rate)/10


Out[167]:
2.9530000000000003

In [168]:
table.conversions[5]


Out[168]:
8

In [169]:
samples1=[]
for i in range(0,len(table)):
    a = [table.conversion_rate[i]]
    #print(a)
    samples1.append(a)
samples1
#return samples1
#print(samples1)


Out[169]:
[[3.96],
 [5.0899999999999999],
 [3.1400000000000001],
 [2.2200000000000002],
 [3.1200000000000001],
 [4.79],
 [2.8500000000000001],
 [2.8999999999999999],
 [1.46],
 [0.0]]

In [170]:
samples1=[[3.9603960396039604],
 [5.0898203592814371],
 [3.1390134529147984],
 [2.2222222222222223],
 [3.125],
 [4.7904191616766472],
 [2.8503562945368173],
 [2.904564315352697],
 [1.4634146341463417]]

In [171]:
samples1[4]


Out[171]:
[3.125]

In [172]:
import random
import math

NUM_CLUSTERS = 3
TOTAL_DATA = len(samples1)
LOWEST_SAMPLE_POINT = samples1.index(min(samples1)) #element 9 of SAMPLES.
HIGHEST_SAMPLE_POINT = samples1.index(max(samples1)) #element 6 of SAMPLES.
Middle_SAMPLE_POINT = 4
BIG_NUMBER = math.pow(10, 10)


SAMPLES = samples1
data1 = []
centroids = []

class DataPoint:
    def __init__(self, x):
        self.x = x
          
    def set_x(self, x):
        self.x = x
    
    def get_x(self):
        return self.x
      
    def set_cluster(self, clusterNumber):
        self.clusterNumber = clusterNumber
    
    def get_cluster(self):
        return self.clusterNumber

class Centroid:
    def __init__(self, x):
        self.x = x
    
    def set_x(self, x):
        self.x = x
    
    def get_x(self):
        return self.x

In [173]:
def initialize_centroids():
    # Set the centoid coordinates to match the data points furthest from each other.
    # In this example, [31, 51.613, 1, 3.2260000000000004] and [758, 59.234999999999999, 22, 2.9019999999999997]
    centroids.append(Centroid(SAMPLES[LOWEST_SAMPLE_POINT][0]))
    centroids.append(Centroid(SAMPLES[HIGHEST_SAMPLE_POINT][0]))
    centroids.append(Centroid(SAMPLES[Middle_SAMPLE_POINT][0]))
    
    print("Centroids initialized at:")
    print("(", centroids[0].get_x(),")")
    print("(", centroids[1].get_x(),")")
    print("(", centroids[2].get_x(),")")
    print()
    return
#print(initialize_centroids())
#print(centroids.append(Centroid(SAMPLES[HIGHEST_SAMPLE_POINT][0])))

In [174]:
def initialize_datapoints():
    # DataPoint objects' x and y values are taken from the SAMPLE array.
    # The DataPoints associated with LOWEST_SAMPLE_POINT and HIGHEST_SAMPLE_POINT are initially
    # assigned to the clusters matching the LOWEST_SAMPLE_POINT and HIGHEST_SAMPLE_POINT centroids.
    for i in range(TOTAL_DATA):
        newPoint = DataPoint(SAMPLES[i][0])
        
        if(i == LOWEST_SAMPLE_POINT):
            newPoint.set_cluster(0)
        elif(i == HIGHEST_SAMPLE_POINT):
            newPoint.set_cluster(1)
        elif(i == Middle_SAMPLE_POINT):
            newPoint.set_cluster(2)
        else:
            newPoint.set_cluster(None)
            
        data1.append(newPoint)
    
    return

In [175]:
def get_distance(dataPointX, centroidX):
    # Calculate Euclidean distance.
    return math.sqrt(math.pow((centroidX - dataPointX), 2))

In [176]:
def recalculate_centroids():
    totalX = 0
    totalInCluster = 0
    
    for j in range(NUM_CLUSTERS):
        for k in range(len(data1)):
            if(data1[k].get_cluster() == j):
                totalX += data1[k].get_x()
                totalInCluster += 1
        
        if(totalInCluster > 0):
            centroids[j].set_x(totalX / totalInCluster)
               
    return

print(recalculate_centroids())


None

In [177]:
def update_clusters():
    isStillMoving = 0
    
    for i in range(TOTAL_DATA):
        bestMinimum = BIG_NUMBER
        currentCluster = 0
        
        for j in range(NUM_CLUSTERS):
            distance = get_distance(data1[i].get_x(), centroids[j].get_x())
            if(distance < bestMinimum):
                bestMinimum = distance
                currentCluster = j
        
        data1[i].set_cluster(currentCluster)
        
        if(data1[i].get_cluster() is None or data1[i].get_cluster() != currentCluster):
            data1[i].set_cluster(currentCluster)
            isStillMoving = 1
    
    return isStillMoving

In [178]:
def perform_kmeans():
    isStillMoving = 1
    
    initialize_centroids()
    
    initialize_datapoints()
    
    while(isStillMoving):
        recalculate_centroids()
        isStillMoving = update_clusters()
    
    return

perform_kmeans()


Centroids initialized at:
( 1.4634146341463417 )
( 5.089820359281437 )
( 3.125 )


In [179]:
def print_results():
    for i in range(NUM_CLUSTERS):
        print("Cluster ", i, " includes:")
        for j in range(TOTAL_DATA):
            if(data1[j].get_cluster() == i):
                print("(", data1[j].get_x(), ")")
        #print(data1[j].get_x())
        print()
    
    return


print_results()


Cluster  0  includes:
( 2.2222222222222223 )
( 1.4634146341463417 )

Cluster  1  includes:
( 3.9603960396039604 )
( 5.089820359281437 )
( 4.790419161676647 )

Cluster  2  includes:
( 3.1390134529147984 )
( 3.125 )
( 2.8503562945368173 )
( 2.904564315352697 )


In [222]:
def print_results():
    result_list = []
    
    for i in range(NUM_CLUSTERS):
        #print(i)
        
        for j in range(TOTAL_DATA):

            if(data1[j].get_cluster() == i):
                
                result_list.append([data1[j].get_cluster(), data1[j].get_x()])
                #result_list.append(result)
                
                
    return result_list


print_results()


Out[222]:
[[0, 2.2222222222222223],
 [0, 1.4634146341463417],
 [1, 3.9603960396039604],
 [1, 5.089820359281437],
 [1, 4.790419161676647],
 [2, 3.1390134529147984],
 [2, 3.125],
 [2, 2.8503562945368173],
 [2, 2.904564315352697]]

In [ ]:


In [183]:
table.sort('sessions')


C:\Users\Analytics\Anaconda3\lib\site-packages\ipykernel\__main__.py:1: FutureWarning: sort(columns=....) is deprecated, use sort_values(by=.....)
  if __name__ == '__main__':
Out[183]:
city_age sessions bounce_rate conversions conversion_rate
4 Croydon, 55-64 32 53.12 1 3.12
9 London, 55-64 37 48.65 0 0.00
3 Croydon, 45-54 90 45.56 2 2.22
0 Croydon, 18-24 101 41.58 4 3.96
5 London, 18-24 167 49.70 8 4.79
8 London, 45-54 205 55.61 3 1.46
2 Croydon, 35-44 223 43.05 7 3.14
1 Croydon, 25-34 334 47.90 17 5.09
7 London, 35-44 482 54.77 14 2.90
6 London, 25-34 842 59.14 24 2.85

In [184]:
table.sort('conversion_rate')


C:\Users\Analytics\Anaconda3\lib\site-packages\ipykernel\__main__.py:1: FutureWarning: sort(columns=....) is deprecated, use sort_values(by=.....)
  if __name__ == '__main__':
Out[184]:
city_age sessions bounce_rate conversions conversion_rate
9 London, 55-64 37 48.65 0 0.00
8 London, 45-54 205 55.61 3 1.46
3 Croydon, 45-54 90 45.56 2 2.22
6 London, 25-34 842 59.14 24 2.85
7 London, 35-44 482 54.77 14 2.90
4 Croydon, 55-64 32 53.12 1 3.12
2 Croydon, 35-44 223 43.05 7 3.14
0 Croydon, 18-24 101 41.58 4 3.96
5 London, 18-24 167 49.70 8 4.79
1 Croydon, 25-34 334 47.90 17 5.09

In [ ]:


In [ ]:


In [ ]:


In [ ]: