In [10]:
import requests # a better package than urllib2
In [50]:
def my_mean(input_list):
list_sum = 0
list_count = 0
for el in input_list:
if el is not None: #Checking if the element is None
list_sum += el
list_count += 1 #NOT COUNTING THE ARTICLE THAT HAS NONETYPE AS A RESULT. IF PUT OUTSIDE THE IF, ARTICLE WILL BE COUNTED.
return list_sum / list_count
In [75]:
def my_median(input_list):
list_length = len(input_list)
return input_list[int(list_length/2)] #RETURNING AN INTEGER
In [76]:
api_key = "ffaf60d7d82258e112dd4fb2b5e4e2d6:3:72421680"
In [77]:
url = "http://api.nytimes.com/svc/search/v2/articlesearch.json?q=gay+marriage&api-key=%ffaf60d7d82258e112dd4fb2b5e4e2d6:3:72421680"
In [78]:
r = requests.get(url)
In [79]:
wc_list = []
for article in r.json()['response']['docs']:
if article['word_count'] is not None:
wc_list.append(int(article['word_count']))
In [80]:
wc_list
Out[80]:
In [81]:
my_mean(wc_list)
Out[81]:
In [82]:
import numpy as np
In [83]:
np.mean(wc_list)
Out[83]:
In [84]:
my_median(wc_list)
Out[84]:
In [85]:
np.median(wc_list)
Out[85]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]: