In [1]:
import requests # a better package than urllib2
In [98]:
def my_mean(input_list):
list_sum = 0
list_count = 0
for el in input_list:
list_sum += int(el)
list_count += 1
return list_sum / list_count
In [109]:
# def nonesorter(a):
# if not a:
# return 0
# return int(a)
def my_median(input_list):
wc_list.sort()
list_length = len(input_list)
return wc_list[int(list_length/2)]
In [4]:
api_key = "ffaf60d7d82258e112dd4fb2b5e4e2d6:3:72421680"
In [6]:
url = "http://api.nytimes.com/svc/search/v2/articlesearch.json?q=gay+marriage&api-key=%s" % api_key
In [7]:
r = requests.get(url)
In [107]:
wc_list = []
for article in r.json()['response']['docs']:
if article['word_count']:
wc_list.append(int(article['word_count']))
In [99]:
my_mean(wc_list)
Out[99]:
In [14]:
import numpy as np
In [114]:
np.mean(wc_list)
Out[114]:
In [110]:
my_median(wc_list)
Out[110]:
In [112]:
np.median(wc_list)
Out[112]:
In [ ]: