In [ ]:
import re, requests, StringIO

Load Fluxtream library and authenticate


In [ ]:
def exec_ipynb(url):
    import json, re, urllib2
    nb = (urllib2.urlopen(url) if re.match(r'https?:', url) else open(url)).read()
    exec '\n'.join([''.join(cell['input']) for cell in json.loads(nb)['worksheets'][0]['cells'] if cell['cell_type'] == 'code']) in globals()

exec_ipynb('Fluxtream-Library.ipynb')
fluxtream_login()

Download CSV from Google Spreadsheet


In [ ]:
device_name = 'Smells_1'
spreadsheet_key = '16.............................VI'
timezone = tz.gettz('America/Chicago')

csv_download_url = 'https://docs.google.com/spreadsheet/ccc?key=%s&output=csv' % spreadsheet_key
print 'Downloading CSV from %s...' % csv_download_url
# Google requires cookies to work across 30x redirects, so we use 'requests' library
data = requests.get(csv_download_url).content
print '%d lines downloaded' % len(data.split('\n'))

In [ ]:
# Execute this cell to define the functions for reading in CSV files.

# Keep track of where you save the files, and enter in the next section
import httplib, urllib, time, base64, string, datetime, json, csv, calendar,pprint
from dateutil import tz
from dateutil import parser

def epoch_time(dt):
    epoch = datetime.datetime(1970, 1, 1, tzinfo=tz.tzutc())
    return (dt - epoch).total_seconds()    

#------------------------------------------------------------------------------------------------------------------
#
# The timezone argument should be a python timezone object.  These are typically created 
# using some variant of the tz.gettz() function.  Documentation is here:
#   http://niemeyer.net/python-dateutil#head-b79630fbbda87af6d6d121737510fd7ea5aeea97
# Examples include:
#    tz.gettz()  uses the local timezone this computer is currently set to
#    tz.gettz("America/New_York")  uses US Eastern time, with appropriate adjustments for daylight savings time
#    tz.gettz("America/Los_Angeles")  uses US Pacific time, with appropriate adjustments for DST
#    tz.gettz("EST")  uses US Eastern Standard Time, which (I think) would ignore DST and always be UTC-5
# The list of available timezones is at: http://en.wikipedia.org/wiki/List_of_tz_database_time_zones

# To instead use a known hour offset from UTC, you could instead use something like this to create the timzeone arg:
#    tz.tzoffset(None, gmt_hr_offset * 3600)
# where gmt_hr_offset is a numeric number of hours different from UTC.  
#
# West of Greenwich England gmt_hr_offset values are negative numbers: 
#     EST would be gmt_hr_offset = -5, EDT would be gmt_hr_offset = -4
# East of Greenwich gmt_hr_offset values are positive numbers:
#     Mainland Europe in the summer would be gmt_hr_offset=2, and in the winter would be gmt_hr_offset=1.

# Replace non alnum chars with _
def sanitize_channel_name(name):
    return re.sub(r'\W+', '_', name).strip('_')

def epoch_time(dt):
    epoch = datetime.datetime(1970, 1, 1, tzinfo=tz.tzutc())
    return (dt - epoch).total_seconds()    

def parse_google_sheet_csv(csv_as_string, timezone):
    csv_reader = csv.reader(StringIO.StringIO(csv_as_string), delimiter=',')

    # First row is header, with names of columns
    header = csv_reader.next()
    
    # First column is assumed to be timestamp;  skip it
    column_names = [sanitize_channel_name(channel) for channel in header[1:]]
    
    # Data, in Fluxtream format
    parsed_data = []
    
    # Read the data
    for row in csv_reader:
        datetime_str = row.pop(0)
        if (datetime_str != ''):
            # Google Docs's time format is month/date/year 24-hr time in localtime.  See
            #   http://www.tutorialspoint.com/python/time_strptime.htm
            # for details of how the parsing for that works in python
            local_ts = datetime.datetime.strptime(datetime_str, '%m/%d/%Y %H:%M:%S')
            local_ts = local_ts.replace(tzinfo=timezone)
            parsed_row = [epoch_time(local_ts)]

            # Iterate over the colums.  At each column, if the cell is not empty, add a row
            for sample in row:
                if sample == '':
                    parsed_sample = None
                elif sample.isdigit():
                    parsed_sample = float(sample)
                else:
                    parsed_sample = sample
                parsed_row.append(parsed_sample)
            parsed_data.append(parsed_row)
        
    return (column_names, parsed_data)

(column_names, records) = parse_google_sheet_csv(data, timezone)
fluxtream_upload(device_name, column_names, records)