In [1]:
import requests # a better package than urllib2
In [2]:
def my_mean(input_list):
list_sum = 0
list_count = 0
for el in input_list:
list_sum += el
list_count += 1
return list_sum / list_count
In [3]:
def my_median(input_list):
list_length = len(input_list)
return input_list[list_length/2]
In [4]:
api_key = "ffaf60d7d82258e112dd4fb2b5e4e2d6:3:72421680"
In [6]:
# Corrected misspelling in 'api-key"
url = "http://api.nytimes.com/svc/search/v2/articlesearch.json?q=gay+marriage&api-key=%s" % api_key
In [35]:
r = requests.get(url)
In [39]:
wc_list = []
for article in r.json()['response']['docs']:
wc_list.append(article['word_count'])
In [41]:
# The list contained a 'None' and str (not ints). I am correcting it here.
wc_list = [ int(i) for i in wc_list if i != None ]
In [43]:
my_mean(wc_list)
Out[43]:
In [44]:
import numpy as np
In [45]:
np.mean(wc_list)
Out[45]:
In [58]:
# Added a function to calculate the median.
def my_median(wc_list):
length = len(wc_list)
half_length = length / 2
if length % 2 != 0:
return wc_list[int(round(half_length)) + 1]
else:
middles = [wc_list[round(half_length)], wc_list[round(half_length) + 1]]
return mean(middles)
my_median(wc_list)
Out[58]:
In [54]:
np.median(wc_list)
Out[54]: