In [1]:
import requests # a better package than urllib2
In [23]:
def my_mean(input_list):
list_sum = 0
list_count = 0
for el in input_list:
list_sum += el
list_count += 1
return list_sum / list_count
#my_mean([1,2,3])
#This code works
Out[23]:
In [34]:
import math
def my_median(input_list):
list_length = len(input_list)
to_use_as_index = math.floor(list_length/2)
#print(to_use_as_index) #Just checking
return input_list[to_use_as_index]
#my_median([1,2,3,4,5])
In [35]:
api_key = "ffaf60d7d82258e112dd4fb2b5e4e2d6:3:72421680"
In [36]:
url = "http://api.nytimes.com/svc/search/v2/articlesearch.json?q=gay+marriage&api-key=%s" % api_key
In [37]:
r = requests.get(url)
In [60]:
wc_list = []
for article in r.json()['response']['docs']:
if article['word_count'] is not None:
wc_list.append(article['word_count'])
print(wc_list)
In [61]:
wc_list = [int(i) for i in wc_list]
wc_list.sort(key=int)
In [62]:
my_mean(wc_list)
Out[62]:
In [63]:
import numpy as np
In [64]:
np.mean(wc_list)
Out[64]:
In [65]:
my_median(wc_list)
Out[65]:
In [66]:
np.median(wc_list)
Out[66]: