In [1]:
import requests # a better package than urllib2
In [21]:
def my_mean(input_list):
list_sum = 0
list_count = 0
for el in input_list:
try:
list_sum += int(el)
list_count += 1
except:
None
return list_sum / list_count
In [56]:
def my_median(input_list):
list_length = len(input_list)
list_median = int(list_length / 2)
return int(input_list[list_median])
In [48]:
api_key = "ffaf60d7d82258e112dd4fb2b5e4e2d6:3:72421680"
api_key='71621eb479f045bf8bee783b6943fdd4'
In [49]:
url = "http://api.nytimes.com/svc/search/v2/articlesearch.json?q=gay+marriage&api-key=71621eb479f045bf8bee783b6943fdd4"
In [50]:
r = requests.get(url)
In [51]:
wc_list = []
for article in r.json()['response']['docs']:
try:
wc_list.append(int(article['word_count']))
except:
None
In [52]:
my_mean(wc_list)
Out[52]:
In [53]:
import numpy as np
In [54]:
np.mean(wc_list)
Out[54]:
In [57]:
my_median = my_median(wc_list)
print(my_median)
In [58]:
np.median(wc_list)
Out[58]:
In [ ]: