In [5]:
import requests # a better package than urllib2
In [6]:
def my_mean(input_list):
list_sum = 0
list_count = 0
for el in input_list:
list_sum += el
list_count += 1
return list_sum / float(list_count) # cast list_count to float
In [42]:
def my_median(input_list):
input_list.sort() # sort the list
list_length = len(input_list) # get length so it doesn't need to be recalculated
# test for even length and take len/2 and len/2 -1 divided over 2.0 for float division
if list_length %2 == 0:
return (input_list[list_length/2] + input_list[(list_length/2) - 1]) / 2.0
else:
return input_list[list_length/2]
In [2]:
api_key = "ffaf60d7d82258e112dd4fb2b5e4e2d6:3:72421680"
In [3]:
url = "http://api.nytimes.com/svc/search/v2/articlesearch.json?q=gay+marriage&api-key=%s" % api_key # variable name mistyped
In [8]:
r = requests.get(url)
In [10]:
wc_list = []
for article in r.json()['response']['docs']:
wc_list.append(int(article['word_count'])) #word_count needs to be cast to int
In [11]:
my_mean(wc_list)
Out[11]:
In [12]:
import numpy as np
In [13]:
np.mean(wc_list)
Out[13]:
In [43]:
my_median(wc_list)
Out[43]:
In [28]:
np.median(wc_list)
Out[28]: