In [ ]:
import collections
import functools
from imposm.parser import OSMParser
import json
from matplotlib import collections as mc
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
from numpy import nan
import numpy as np
import pandas as pd
import pyproj
import requests
import scipy as sp
import rtree
# import seaborn as sb
from scipy import signal
# import shapely
import shapely.geometry
%pylab inline

import data_munging

Ride Report Method

Here, we use the match method from the OSRM API with the code modified to return only the endpoints of segments. This allows us to aggregate over OSM segments since the node IDs are uniquely associated with a lat/lon pair given sufficient precision in the returned coordinates. The API recommends not using every single value for the match method, but I'm giving them regardless because it's easier to code. Down-sampling the ride might actually help to smooth some of the rides. (or perhaps not if we accidentally get a jagged part).

Currently, I am unsure how to mark up OSM data with bumpiness information, as we have data that look like this in the raw OSM file:

<way id="23642309" version="25" timestamp="2013-12-26T23:03:24Z" changeset="19653154" uid="28775" user="StellanL"> <nd ref="258965973"/> <nd ref="258023463"/> <nd ref="736948618"/> <nd ref="258023391"/> <nd ref="736948622"/> <nd ref="930330659"/> <nd ref="736861978"/> <nd ref="930330542"/> <nd ref="930330544"/> <nd ref="929808660"/> <nd ref="736934948"/> <nd ref="930330644"/> <nd ref="736871567"/> <nd ref="619628331"/> <nd ref="740363293"/> <nd ref="931468900"/> <tag k="name" v="North Wabash Avenue"/> <tag k="highway" v="tertiary"/> <tag k="loc_ref" v="44 E"/> </way>"

My tentative idea is to match up the lat/lons with OSM id using IMPOSM, then find the nd refs in the original data and add a property that contains bumpiness information.


In [ ]:
rides, readings = data_munging.read_raw_data()
readings = data_munging.clean_readings(readings)
readings = data_munging.add_proj_to_readings(readings, data_munging.NAD83)

If using a Dockerized OSRM instance, you can get the IP address by linking up to the Docker container running OSRM and pinging it. Usually though, the url here will be correct since it is the default.


In [ ]:
digital_ocean_url = 'http://162.243.23.60/osrm-chi-vanilla/'
local_docker_url = 'http://172.17.0.2:5000/'
url = local_docker_url
nearest_request = url + 'nearest?loc={0},{1}'
match_request = url + 'match?loc={0},{1}&t={2}&loc={3},{4}&t={5}'

In [ ]:
def readings_to_match_str(readings):
    data_str = '&loc={0},{1}&t={2}'
    output_str = ''
    elapsed_time = 0
    for i, reading in readings.iterrows():
        elapsed_time += 1
        new_str = data_str.format(str(reading['start_lat']), str(reading['start_lon']), str(elapsed_time))
        output_str += new_str
    return url + 'match?' + output_str[1:]

This is a small example of how everything should work for troubleshooting and other purposes.


In [ ]:
test_request = readings_to_match_str(readings.loc[readings['ride_id'] == 128,  :])
print(test_request)

In [ ]:
matched_ride = requests.get(test_request).json()

In [ ]:
snapped_points =  pd.DataFrame(matched_ride['matchings'][0]['matched_points'], columns=['lat', 'lon'])

In [ ]:
ax = snapped_points.plot(x='lon', y='lat', kind='scatter')
readings.loc[readings['ride_id'] == 128,  :].plot(x='start_lon', y='start_lat', kind='scatter', ax=ax)
fig = plt.gcf()
fig.set_size_inches(18.5, 10.5)
plt.show()

In [ ]:
a_reading = readings.loc[0, :]
test_match_request = match_request.format(a_reading['start_lat'],
                                      a_reading['start_lon'], 
                                      0,
                                      a_reading['end_lat'],
                                      a_reading['end_lon'],
                                      1)
# This does not work because OSRM does not accept floats as times. 
# test_map_request = map_request.format(*tuple(a_reading[['start_lat', 'start_lon', 'start_time',
#                                                 'end_lat', 'end_lon', 'end_time']]))

In [ ]:
test_nearest_request = nearest_request.format(a_reading['start_lat'], a_reading['start_lon'])

In [ ]:
osrm_response = requests.get(test_match_request).json()
osrm_response['matchings'][0]['matched_points']

In [ ]:
osrm_response = requests.get(test_nearest_request).json()
osrm_response['mapped_coordinate']

In [ ]:
readings['snapped_lat'] = 0
readings['snapped_lon'] = 0

In [ ]:
chi_readings = data_munging.filter_readings_to_chicago(readings)
chi_rides = list(set(chi_readings.ride_id))

In [ ]:
# This is a small list of rides that I think are bad based upon their graphs.
# I currently do not have an automatic way to update this.
bad_rides = [128, 129, 5.0, 7.0, 131, 133, 34, 169]
good_chi_rides = [i for i in chi_rides if i not in bad_rides]

In [ ]:
for ride_id in chi_rides:
    if ride_id in bad_rides:
        print('ride_id')
        try:
            print('num readings: ' + str(sum(readings['ride_id'] == ride_id)))
        except:
            print('we had some issues here.')

In [ ]:
all_snapped_points = []
readings['snapped_lat'] = np.NaN
readings['snapped_lon'] = np.NaN
for ride_id in chi_rides:
    if pd.notnull(ride_id):
        ax = readings.loc[readings['ride_id'] == ride_id, :].plot(x='start_lon', y='start_lat')
        try:
            matched_ride = requests.get(readings_to_match_str(readings.loc[readings['ride_id'] == ride_id,  :])).json() 
            readings.loc[readings['ride_id'] == ride_id, ['snapped_lat', 'snapped_lon']] = matched_ride['matchings'][0]['matched_points']
            readings.loc[readings['ride_id'] == ride_id, :].plot(x='snapped_lon', y='snapped_lat', ax=ax)
        except:
            print('could not snap')
        plt.title('Plotting Ride ' + str(ride_id))
        fig = plt.gcf()
        fig.set_size_inches(18.5, 10.5)
        plt.show()

In [ ]:
ax = readings.loc[readings['ride_id'] == 2, :].plot(x='snapped_lon', y='snapped_lat', style='r-')
for ride_id in good_chi_rides:
    print(ride_id)
    try:
#         readings.loc[readings['ride_id'] == ride_id, :].plot(x='start_lon', y='start_lat', ax=ax)
        readings.loc[readings['ride_id'] == ride_id, :].plot(x='snapped_lon', y='snapped_lat', ax=ax, style='b-')
    except:
        print('bad')
ax = readings.loc[readings['ride_id'] == 2, :].plot(x='snapped_lon', y='snapped_lat', style='r-', ax=ax)
fig = plt.gcf()
fig.set_size_inches(36, 36)
plt.show()

In [ ]:
# This code goes through a ride backwards in order to figure out what two endpoints 
# the bicycle was going between.
readings['next_snapped_lat'] = np.NaN
readings['next_snapped_lon'] = np.NaN
for ride_id in chi_rides:
    next_lat_lon = (np.NaN, np.NaN)
    for index, row in reversed(list(readings.loc[readings['ride_id'] == ride_id, :].iterrows())):
        readings.loc[index, ['next_snapped_lat', 'next_snapped_lon']] = next_lat_lon
        if (row['snapped_lat'], row['snapped_lon']) != next_lat_lon:
            next_lat_lon = (row['snapped_lat'], row['snapped_lon'])

In [ ]:
clean_chi_readings = readings.loc[[ride_id in chi_rides for ride_id in readings['ride_id']], :]

In [ ]:
clean_chi_readings.to_csv(data_munging.data_dir + 'clean_chi_readings.csv')

In [ ]:
clean_chi_readings = pd.read_csv(data_munging.data_dir + 'clean_chi_readings.csv')

In [ ]:
road_bumpiness = collections.defaultdict(list)
for index, reading in clean_chi_readings.iterrows():
    if reading['gps_mph'] < 30 and reading['gps_mph'] > 3:
        osm_segment = [(reading['snapped_lat'], reading['snapped_lon']),
                      (reading['next_snapped_lat'], reading['next_snapped_lon'])]
        osm_segment = sorted(osm_segment)
        if all([lat_lon != (np.NaN, np.NaN) for lat_lon in osm_segment]):
            road_bumpiness[tuple(osm_segment)].append(reading['abs_mean_over_speed'])

In [ ]:
# sorted_road_bumpiness = sorted(road_bumpiness.items(), key=lambda i: len(i[1]), reverse=True)

In [ ]:
total_road_readings = dict((osm_segment, len(road_bumpiness[osm_segment])) for osm_segment in road_bumpiness)

In [ ]:
agg_road_bumpiness = dict((osm_segment, np.mean(road_bumpiness[osm_segment])) for osm_segment in road_bumpiness)

In [ ]:
agg_path = data_munging.data_dir + 'agg_road_bumpiness.txt'

This section here functions as a shortcut if you just want to load up the aggregate bumpiness instead of having to calculate all of it


In [ ]:
with open(agg_path, 'w') as f:
    f.write(str(agg_road_bumpiness))

In [ ]:
with open(agg_path, 'r') as f:
    agg_road_bumpiness = f.read()

In [ ]:
agg_road_bumpiness = eval(agg_road_bumpiness)

In [ ]:
def osm_segment_is_null(osm_segment):
    return (pd.isnull(osm_segment[0][0])
            or pd.isnull(osm_segment[0][1])
            or pd.isnull(osm_segment[1][0])
            or pd.isnull(osm_segment[1][1]))

In [ ]:
agg_road_bumpiness = dict((osm_segment, agg_road_bumpiness[osm_segment]) for osm_segment in agg_road_bumpiness if not osm_segment_is_null(osm_segment))

In [ ]:
# This is where we filter out all osm segments that are too long

In [ ]:
def find_seg_dist(lat_lon):
    return data_munging.calc_dist(lat_lon[0][1], lat_lon[0][0], lat_lon[1][1], lat_lon[1][0])

In [ ]:
seg_dist = dict()
for lat_lon in agg_road_bumpiness:
    seg_dist[lat_lon] = data_munging.calc_dist(lat_lon[0][1], lat_lon[0][0], lat_lon[1][1], lat_lon[1][0])

In [ ]:
with open('../dat/chi_agg_info.csv', 'w') as f:
    f.write('lat_lon_tuple|agg_road_bumpiness|total_road_readings|seg_dist\n')
    for lat_lon in agg_road_bumpiness:
        if data_munging.calc_dist(lat_lon[0][1], lat_lon[0][0], lat_lon[1][1], lat_lon[1][0]) < 200:
            f.write(str(lat_lon) + '|' + str(agg_road_bumpiness[lat_lon])
                    + '|'  + str(total_road_readings[lat_lon])
                    + '|' + str(seg_dist[lat_lon]) + '\n')

In [ ]:
seg_dist[lat_lon]

In [ ]:
np.max(agg_road_bumpiness.values())

In [ ]:
plt.hist(agg_road_bumpiness.values())

In [ ]:
import matplotlib.colors as colors

In [ ]:
plasma = cm = plt.get_cmap('plasma')
cNorm  = colors.Normalize(vmin=0, vmax=1.0)
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=plasma)

In [ ]:
for osm_segment, bumpiness in agg_road_bumpiness.items():
#     lat_lon = osm_segment
#     color = (1, 0, 0) if data_munging.calc_dist(lat_lon[0][1], lat_lon[0][0], lat_lon[1][1], lat_lon[1][0]) > 100 else (0, 1, 0)
    plt.plot([osm_segment[0][1], osm_segment[1][1]],
             [osm_segment[0][0], osm_segment[1][0]],
#              color=color)
             color=scalarMap.to_rgba(bumpiness))
fig = plt.gcf()
fig.set_size_inches(24, 48)
plt.show()

In [ ]:
filtered_agg_bumpiness = dict((lat_lon, agg_road_bumpiness[lat_lon])
                               for lat_lon in agg_road_bumpiness if find_seg_dist(lat_lon) < 200)

In [ ]:
with open(data_dir + 'filtered_chi_road_bumpiness.txt', 'w') as f:
    f.write(str(filtered_agg_bumpiness))