In [1]:
import requests # a better package than urllib2
In [55]:
def my_mean(input_list):
list_sum = 0
list_count = 0
for el in input_list:
intel = int(el)
list_sum += intel
list_count += 1
return list_sum / list_count
In [57]:
def my_median(input_list):
sorted_list = sorted(input_list)
list_length = len(sorted_list)
half_length = int(list_length/2)
return float(sorted_list[half_length])
In [26]:
api_key = "ffaf60d7d82258e112dd4fb2b5e4e2d6:3:72421680"
In [27]:
url = "http://api.nytimes.com/svc/search/v2/articlesearch.json?q=gay+marriage&api-key=%s" % api_key
In [28]:
r = requests.get(url)
In [29]:
wc_list = []
for article in r.json()['response']['docs']:
wc_list.append(article['word_count'])
In [54]:
wc_list = [int(i) for i in wc_list if i != None]
wc_list
Out[54]:
In [34]:
my_mean(wc_list)
Out[34]:
In [35]:
import numpy as np
In [36]:
np.mean(wc_list)
Out[36]:
In [58]:
my_median(wc_list)
Out[58]:
In [38]:
np.median(wc_list)
Out[38]: