In [1]:
import requests # a better package than urllib2
In [192]:
def my_mean(input_list):
list_sum = 0
list_count = 0
for el in input_list:
if el is not None:
#print(el)
list_sum = list_sum + int(el)
list_count = list_count + 1
#print(list_sum)
#print(list_count)
return list_sum / list_count
In [227]:
def my_median(input_list):
list_length = len(input_list)
input_list = sorted(input_list)
return input_list[round(list_length/2)]
In [228]:
api_key = "ffaf60d7d82258e112dd4fb2b5e4e2d6:3:72421680"
In [229]:
url = "http://api.nytimes.com/svc/search/v2/articlesearch.json?q=gay+marriage&api-key=%s" % api_key
In [230]:
print(url)
In [231]:
r = requests.get(url)
In [232]:
data = r.json()
In [233]:
# data
In [234]:
wc_list = []
for article in r.json()['response']['docs']:
# print(article)
wc_list.append(article['word_count'])
In [235]:
wc_list
Out[235]:
In [236]:
wc_list = [int(i) for i in wc_list if i is not None]
In [237]:
wc_list
Out[237]:
In [238]:
my_mean(wc_list)
Out[238]:
In [239]:
import numpy as np
In [240]:
np.mean(wc_list)
Out[240]:
In [241]:
my_median(wc_list)
Out[241]:
In [242]:
np.median(wc_list)
Out[242]:
In [243]:
sorted(wc_list)
Out[243]: