In [107]:
import requests # a better package than urllib2
In [108]:
def my_mean(input_list):
list_sum = 0
list_count = 0
for el in input_list:
list_sum += el
list_count += 1
return list_sum / list_count
In [109]:
def my_median(input_list):
sorted_input = sorted(input_list)
if len(input_list) %2 != 0:
median_value_index = (len(input_list) - 1) / 2
return sorted_input[int(median_value_index)]
else:
above_median_value_index = len(input_list) / 2
below_median_value_index = above_median_value_index - 1
return (sorted_input[int(above_median_value_index)] + sorted_input[int(below_median_value_index)])/2
In [110]:
api_key = "ffaf60d7d82258e112dd4fb2b5e4e2d6:3:72421680"
In [111]:
url = "http://api.nytimes.com/svc/search/v2/articlesearch.json?q=gay+marriage&api-key={}".format(api_key)
In [112]:
r = requests.get(url)
In [113]:
wc_list = []
for article in r.json()['response']['docs']:
if article['word_count'] != None:
wc_list.append(int(article['word_count']))
wc_list
Out[113]:
In [114]:
my_mean(wc_list)
Out[114]:
In [115]:
import numpy as np
In [116]:
np.mean(wc_list)
Out[116]:
In [117]:
my_median(wc_list)
Out[117]:
In [118]:
np.median(wc_list)
Out[118]:
In [ ]: