In [3]:
import requests # a better package than urllib2
In [4]:
def my_mean(input_list):
list_sum = 0
list_count = 0
for el in input_list:
list_sum += el
list_count += 1
return list_sum / list_count
In [20]:
def my_median(input_list):
list_length = len(input_list)
sort_list = sorted(input_list)
if list_length%2 == 0:
return sort_list[list_length/2]
else:
return sort_list[int((list_length - 1)/2)]
In [ ]:
In [6]:
api_key = "ffaf60d7d82258e112dd4fb2b5e4e2d6:3:72421680"
In [7]:
url = "http://api.nytimes.com/svc/search/v2/articlesearch.json?q=gay+marriage&api-key=ffaf60d7d82258e112dd4fb2b5e4e2d6:3:72421680"
In [8]:
r = requests.get(url)
In [9]:
len(r.json()['response']['docs'])
Out[9]:
In [10]:
wc_list = []
for article in r.json()['response']['docs']:
if article['word_count'] != None:
wc_list.append(int(article['word_count']))
In [11]:
wc_list
Out[11]:
In [17]:
sorted(wc_list)
Out[17]:
In [12]:
my_mean(wc_list)
Out[12]:
In [13]:
import numpy as np
In [14]:
np.mean(wc_list)
Out[14]:
In [22]:
my_median(wc_list)
Out[22]:
In [21]:
np.median(wc_list)
Out[21]:
In [ ]: