IPython keyboard shortcuts: http://ipython.org/ipython-doc/stable/interactive/notebook.html#keyboard-shortcuts
In [1]:
from __future__ import division, print_function, unicode_literals
%matplotlib inline
import os
import IPython.display
import numpy as np
import requests
import requests_oauthlib
import oauthlib
import arrow
import json_io
import yaml_io
import utilities
In [2]:
# Load application's Twitter API details.
fname_twitter_api = 'twitter_api.yml'
info_twitter = yaml_io.read(fname_twitter_api)
client_id = info_twitter['consumer_key']
client_secret = info_twitter['consumer_secret']
access_token = info_twitter['access_token']
# Twitter API urls.
# url_api = 'https://api.twitter.com'
# url_request_oauth1_token = url_api + '/oauth/request_token'
# url_request_oauth2_token = url_api + '/oauth2/token'
# url_authorize = url_api + '/oauth/authorize'
# url_access_token = url_api + '/oauth/access_token'
# Backend client.
client = oauthlib.oauth2.BackendApplicationClient(client_id)
# Generate a requests.Session object authorized via OAuth-2.
token = {'access_token': access_token, 'token_type': 'Bearer'}
twitter = requests_oauthlib.OAuth2Session(client, token=token)
In [3]:
# Setup.
url_status = 'https://api.twitter.com/1.1/application/rate_limit_status.json'
params = {'resources': 'search'}
# Request information from Twitter.
response = twitter.get(url_status, params=params)
# Interpret the results.
info_status = response.json()
info_search = info_status['resources']['search']['/search/tweets']
delta = arrow.get(info_search['reset']) - arrow.now()
minutes = delta.total_seconds()/60.
limit = info_search['limit']
remaining = info_search['remaining']
# Display.
print('Rate Limit Status')
print('Limit: {:d}'.format(limit))
print('Remain: {:d}'.format(remaining))
print('Reset: {:.1f} min'.format(minutes))
The following set of links to Twitter's documentation are those I found most useful:
The page Help with the Search API has this helpful tidbit of information when you expect a large number of return tweets. In this case it is important to pay attention to iterating through the results:
Iterating in a result set: parameters such count, until, since_id, max_id allow to control how we iterate through search results, since it could be a large set of tweets. The 'Working with Timelines' documentation is a very rich and illustrative tutorial to learn how to use these parameters to achieve the best efficiency and reliability when processing result sets.
In [4]:
# Setup.
query = 'grey hound dog'
count = 25
url_search = 'https://api.twitter.com/1.1/search/tweets.json'
params = {'q': query, 'include_entities': True, 'count': count}
# Request information from Twitter.
response = twitter.get(url_search, params=params)
# Interpret the results.
info_search = response.json()
search_meta = info_search['search_metadata']
search_tweets = info_search['statuses']
print('\nSearch metadata')
print('---------------')
IPython.display.display(search_meta)
print('\n\nFirst returned tweet')
print('--------------------')
IPython.display.display(search_tweets[0])
Rate limit information is also contained within response header.
In [18]:
for k in ['x-rate-limit-remaining', 'x-rate-limit-limit', 'x-rate-limit-reset']:
v = response.headers[k]
print(k, v)
In [ ]: