In [1]:
import numpy as np, pandas as pd, os
from synthicity.utils import misc
from drcog.models import elcm_simulation, hlcm_simulation, regression_model_simulation, dataset
dset = dataset.DRCOGDataset(os.path.join(misc.data_dir(),'drcog.h5'))
np.random.seed(1)
#Variable Library
from drcog.variables import variable_library
variable_library.calculate_variables(dset)
b = dset.buildings
e = dset.establishments
hh = dset.households
zone_refine = pd.read_csv(os.path.join(misc.data_dir(),'zone_demand_refine.csv'))
In [2]:
p = dset.parcels
In [3]:
p[np.in1d(p.zone_id,[1822,1853,1857,1845,1860,1846,1858,1848,1851])].groupby('zone_id').size()
Out[3]:
In [7]:
1853 in p.zone_id.values
Out[7]:
In [14]:
new_building_id = dset.buildings.index.values.max() + 1
In [11]:
pid = p.index.values[p.zone_id==1853][0]
newbuildings = pd.DataFrame({'building_type_id':[4],'improvement_value':[10000],'land_area':[200],'non_residential_sqft':[500],
'parcel_id':[pid],'residential_units':[0],'sqft_per_unit':[0],'stories':[0],'tax_exempt':[0],'year_built':[2000],'bldg_sq_ft':[500],
'unit_price_non_residential':[2.0],'unit_price_residential':[0.0], 'building_sqft_per_job':[250.0],
'non_residential_units':[2],'base_year_jobs':[0.0],'all_units':[2]})
In [15]:
newbuildings.index = np.array([new_building_id])
In [18]:
newbuildings.head()
Out[18]:
In [20]:
pd.concat([dset.buildings,newbuildings]).index.values.max()
Out[20]:
In [23]:
len(pd.concat([dset.buildings,newbuildings]).columns)
Out[23]:
In [24]:
len(dset.buildings.columns)
Out[24]:
In [25]:
len(dset.buildings)
Out[25]:
In [26]:
dset.d['buildings'] = pd.concat([dset.buildings,newbuildings])
dset.buildings.index.name = 'building_id'
In [27]:
len(dset.buildings)
Out[27]:
In [37]:
z = dset.zones
In [35]:
1822 in p.zone_id.values
Out[35]:
In [40]:
zone_id = 1822
In [51]:
county = z.county.values[z.index.values==zone_id][0]
x = z.zonecentroid_x.values[z.index.values==zone_id][0]
y = z.zonecentroid_y.values[z.index.values==zone_id][0]
In [53]:
y
Out[53]:
In [45]:
if county == 'Denver':
county_id = 8031
elif county == 'Adams':
county_id = 8001
elif county == 'Arapahoe':
county_id = 8005
elif county == 'Boulder':
county_id = 8013
elif county == 'Broomfield':
county_id = 8014
elif county == 'Clear Creek':
county_id = 8019
elif county == 'Douglas':
county_id = 8035
elif county == 'Elbert':
county_id = 8039
elif county == 'Gilpin':
county_id = 8047
elif county == 'Jefferson':
county_id = 8059
elif county == 'Weld':
county_id = 8123
In [49]:
pid = p.index.values.max()+1
In [54]:
newparcel = pd.DataFrame({'county_id':[county_id],'parcel_sqft':[43560],'land_value':[0],'zone_id':[zone_id],
'centroid_x':[x],'centroid_y':[y],'dist_bus':[6000],'dist_rail':[6000],'in_ugb':[1],'in_uga':[0],
'prop_constrained':[0.0],'acres':[1.0] })
In [56]:
newparcel.index = np.array([pid])
In [59]:
len(p)
Out[59]:
In [58]:
dset.d['parcels'] = pd.concat([dset.parcels,newparcel])
In [61]:
dset.parcels.index.name = 'parcel_id'
In [28]:
count
Out[28]:
In [84]:
b.groupby('zone_id').residential_units.sum()+zone_index
Out[84]:
In [85]:
base_ru_zone = b.groupby('zone_id').residential_units.sum()+zone_index
In [87]:
base_ru_zone = base_ru_zone.fillna(0)
In [88]:
base_ru_zone
Out[88]:
In [65]:
z.index.values
Out[65]:
In [68]:
base_ru_zone
Out[68]:
In [69]:
base_ru_zone.index = z.index.values
In [71]:
z.index.name
Out[71]:
In [79]:
z_index = dset.zones.index
zone_index = pd.Series(index=z_index).fillna(0)
In [80]:
zone_index
Out[80]:
In [82]:
base_ru_zone
Out[82]:
In [81]:
base_ru_zone+zone_index
Out[81]:
In [3]:
b[np.in1d(b.zone_id,[1822,1853,1857,1845,1860,1846,1858,1848,1851])].groupby('zone_id').size()
Out[3]:
In [26]:
print (hh.zone_id==1656).sum()
print (hh.zone_id==1874).sum()
print (e.zone_id==1656).sum()
print (e.zone_id==1874).sum()
In [2]:
def relocate_agents(agents_joined,zone_id,number_of_agents):
agent_pool = agents_joined[agents_joined.zone_id!=zone_id]
shuffled_ids = agent_pool.index.values
np.random.shuffle(shuffled_ids)
agents_to_relocate = shuffled_ids[:number_of_agents]
idx_agents_to_relocate = np.in1d(agents_joined.index.values,agents_to_relocate)
try:
new_building_id = b[b.zone_id==zone_id].index.values[0]
agents_joined.building_id[idx_agents_to_relocate] = new_building_id
except:
print 'No buildings in specified zone. Cannot place agents.'
def unplace_agents(agents_joined,zone_id,number_of_agents):
number_of_agents = -number_of_agents #flip the sign
agent_pool = agents_joined[agents_joined.zone_id==zone_id] ##Notice the equality instead of disequality
if len(agent_pool) > number_of_agents:
shuffled_ids = agent_pool.index.values
np.random.shuffle(shuffled_ids)
agents_to_relocate = shuffled_ids[:number_of_agents]
idx_agents_to_relocate = np.in1d(agents_joined.index.values,agents_to_relocate)
agents_joined.building_id[idx_agents_to_relocate] = -1 #unplace
In [3]:
def relocate_estabs(agents_joined,zone_id,number_of_agents):
agent_pool = agents_joined[(agents_joined.zone_id!=zone_id)]
e_sample = agent_pool.reindex(np.random.permutation(agent_pool.index))
e_to_move = e_sample[np.cumsum(e_sample['employees'].values)<abs(number_of_agents+10)]
shuffled_ids = e_to_move.index.values
np.random.shuffle(shuffled_ids)
agents_to_relocate = shuffled_ids
idx_agents_to_relocate = np.in1d(agents_joined.index.values,agents_to_relocate)
try:
new_building_id = b[b.zone_id==zone_id].index.values[0]
agents_joined.building_id[idx_agents_to_relocate] = new_building_id
except:
print 'No buildings in specified zone. Cannot place agents.'
In [4]:
def unplace_estabs(agents_joined,zone_id,number_of_agents):
number_of_agents = -number_of_agents #flip the sign
agent_pool = agents_joined[agents_joined.zone_id==zone_id] ##Notice the equality instead of disequality
if agent_pool.employees.sum() > number_of_agents:
e_sample = agent_pool.reindex(np.random.permutation(agent_pool.index))
e_to_move = e_sample[np.cumsum(e_sample['employees'].values)<abs(number_of_agents)]
shuffled_ids = e_to_move.index.values
np.random.shuffle(shuffled_ids)
agents_to_relocate = shuffled_ids
idx_agents_to_relocate = np.in1d(agents_joined.index.values,agents_to_relocate)
agents_joined.building_id[idx_agents_to_relocate] = -1 #unplace
In [5]:
current_year=2011
for zone in zone_refine.zone_id.values:
idx_zone = (zone_refine.zone_id==zone)
hh_shift = zone_refine.annual_hh_shift[idx_zone].values[0]
emp_shift = zone_refine.annual_emp_shift[idx_zone].values[0]
if hh_shift > 0:
relocate_agents(hh,zone,hh_shift)
if emp_shift > 0:
relocate_estabs(e,zone,emp_shift)
if current_year < 2040:
if hh_shift < 0:
unplace_agents(hh,zone,hh_shift)
if emp_shift < 0:
unplace_agents(e,zone,emp_shift)
In [6]:
variable_library.calculate_variables(dset)
hh = dset.households
e = dset.establishments
print (hh.zone_id==1656).sum()
print (hh.zone_id==1874).sum()
print (e.zone_id==1656).sum()
print (e.zone_id==1874).sum()
In [8]:
zone_refine.head(50)
Out[8]:
In [45]:
e_sample = e.reindex(np.random.permutation(e.index))
e_sample.index.name = 'establishment_id'
In [47]:
num = 200
e_to_move = e_sample[np.cumsum(e_sample['employees'].values)<abs(num)]
In [49]:
e_to_move.employees.sum()
Out[49]:
In [27]:
for zone in zone_refine.zone_id.values:
idx_zone = (zone_refine.zone_id==zone)
hh_shift = zone_refine.annual_hh_shift[idx_zone].values[0]
emp_shift = zone_refine.annual_emp_shift[idx_zone].values[0]
if hh_shift > 0:
relocate_agents(hbp,household_set,zone,hh_shift)
if emp_shift > 0:
relocate_agents(jbp,job_set,zone,emp_shift)
if current_year < 2040:
if hh_shift < 0:
unplace_agents(hbp,household_set,zone,hh_shift)
if emp_shift < 0:
unplace_agents(jbp,job_set,zone,emp_shift)
In [60]:
e.groupby('zone_id').employees.sum()
Out[60]:
In [62]:
hh.groupby('zone_id').size().tail(20)
Out[62]:
In [4]:
dset.store.buildings[['residential_units',]]
Out[4]:
In [2]:
e = establishments.reset_index()
bids = []
eids = []
hbs = []
sids = []
for idx in e.index:
for job in range(e.employees[idx]):
bids.append(e.building_id[idx])
eids.append(e.index[idx])
hbs.append(e.home_based_status[idx])
sids.append(e.sector_id[idx])
print len(bids)
print len(eids)
print len(hbs)
print len(sids)
jobs = pd.DataFrame({'tempid':range(1,len(bids)+1),'building_id':bids,'establishment_id':eids,'home_based_status':hbs,'sector_id':sids})
In [3]:
jobs
Out[3]:
In [4]:
buildings[['residential_units','centroid_x','centroid_y','external_zone_id']]
Out[4]:
In [5]:
np.unique(buildings.external_zone_id)
Out[5]:
In [13]:
jobs['x'] = buildings.centroid_x[jobs.building_id].values
jobs['y'] = buildings.centroid_y[jobs.building_id].values
jobs['taz05_id'] = buildings.external_zone_id[jobs.building_id].values
jobs['sector_id_six'] = 1*(jobs.sector_id==61) + 2*(jobs.sector_id==71) + 3*np.in1d(jobs.sector_id,[11,21,22,23,31,32,33,42,48,49]) + 4*np.in1d(jobs.sector_id,[7221,7222,7224]) + 5*np.in1d(jobs.sector_id,[44,45,7211,7212,7213,7223]) + 6*np.in1d(jobs.sector_id,[51,52,53,54,55,56,62,81,92])
jobs['jobtypename'] = ''
jobs.jobtypename[jobs.sector_id_six==1] = 'Education'
jobs.jobtypename[jobs.sector_id_six==2] = 'Entertainment'
jobs.jobtypename[jobs.sector_id_six==3] = 'Production'
jobs.jobtypename[jobs.sector_id_six==4] = 'Restaurant'
jobs.jobtypename[jobs.sector_id_six==5] = 'Retail'
jobs.jobtypename[jobs.sector_id_six==6] = 'Service'
jobs['urbancenter_id'] = 0
In [5]:
np.unique(dset.zones['external_zone_id'].values)
Out[5]:
In [14]:
jobs.tail()
Out[14]:
In [10]:
jobs[jobs.building_id==-1]
Out[10]:
In [32]:
establishments[establishments.building_id==-1]
Out[32]:
In [2]:
alternatives = buildings[(buildings.residential_units>0)]
hlcm_simulation.simulate(dset, year=sim_year,
depvar = 'building_id',alternatives=alternatives,simulation_table = 'households',output_names = ("drcog-coeff-hlcm-%s.csv","DRCOG HOUSEHOLD LOCATION CHOICE MODELS (%s)","hh_location_%s","household_building_ids"),
agents_groupby= ['income_3_tenure',],
transition_config = {'Enabled':True,'control_totals_table':'annual_household_control_totals','scaling_factor':1.0},
relocation_config = {'Enabled':True,'relocation_rates_table':'annual_household_relocation_rates','scaling_factor':1.0},
)
In [3]:
alternatives = buildings[(buildings.non_residential_sqft>0)]
elcm_simulation.simulate(dset, year=sim_year,
depvar = 'building_id',alternatives=alternatives,simulation_table = 'establishments',output_names = ("drcog-coeff-elcm-%s.csv","DRCOG EMPLOYMENT LOCATION CHOICE MODELS (%s)","emp_location_%s","establishment_building_ids"),
agents_groupby= ['sector_id_retail_agg',],
transition_config = {'Enabled':True,'control_totals_table':'annual_employment_control_totals','scaling_factor':1.0})
In [4]:
regression_model_simulation.simulate(dset, year=sim_year, output_varname='unit_price_residential',
simulation_table='buildings', output_names = ["drcog-coeff-reshedonic-%s.csv","DRCOG RESHEDONIC MODEL (%s)","resprice_%s"],
agents_groupby = 'building_type_id', segment_ids = [2,3,20,24])
In [5]:
regression_model_simulation.simulate(dset, year=sim_year,
output_varname='unit_price_non_residential', simulation_table='buildings', output_names = ["drcog-coeff-nrhedonic-%s.csv","DRCOG NRHEDONIC MODEL (%s)","nrprice_%s"],
agents_groupby = 'building_type_id', segment_ids = [5,8,11,16,17,18,21,23,9,22])
In [6]:
developer_configuration = {
'enforce_environmental_constraints':True,
'enforce_allowable_use_constraints':True,
'enforce_ugb':False,
'outside_ugb_allowable_density':1.0,
'uga_policies':False,
'inside_uga_allowable_density':1.0,
'max_allowable_far_field_name':'far',
'land_property_acquisition_cost_factor':1.0,
'profit_factor':1.0,
'min_building_sqft':400,
'min_lot_sqft':500,
'zonal_levers':True
}
from urbandeveloper import proforma_developer_model
buildings, newbuildings = proforma_developer_model.run(dset,hh_zone1,emp_zone1,developer_configuration,sim_year)
dset.d['buildings'] = pd.concat([buildings,newbuildings])
In [7]:
# sim_year = 2015
# urbancanvas_scenario_id = 0
# def export_to_urbancanvas(building_df,current_year,urbancanvas_scenario_id):
# import pandas.io.sql as sql
# import psycopg2
# import cStringIO
# conn_string = "host='paris.urbansim.org' dbname='denver' user='drcog' password='M0untains#' port=5433"
# conn=psycopg2.connect(conn_string)
# cur = conn.cursor()
# if urbancanvas_scenario_id == 0:
# query = "select nextval('developmentproject_id_seq');"
# nextval = sql.read_frame(query,conn)
# nextval = nextval.values[0][0]
# query = "select max(id)+1 from scenario_project;"
# id = sql.read_frame(query,conn)
# id = id.values[0][0]
# query = "INSERT INTO scenario(id, name) VALUES(%s, 'Run #%s');" % (nextval,nextval)
# cur.execute(query)
# conn.commit()
# query = "INSERT INTO scenario_project(id, scenario, project) VALUES(%s, %s, 1);" % (id,nextval)
# cur.execute(query)
# conn.commit()
# query = "select max(id)+1 from scenario_project;"
# id = sql.read_frame(query,conn)
# id = id.values[0][0]
# query = "INSERT INTO scenario_project(id, scenario, project) VALUES(%s, %s, %s);" % (id,nextval,nextval)
# cur.execute(query)
# conn.commit()
# else:
# nextval = urbancanvas_scenario_id
# nextval_string = '{' + str(nextval) + '}'
# building_df['projects'] = nextval_string
# valid_from = '{' + str(current_year) + '-1-1}'
# building_df['valid_from'] = valid_from
# building_df['land_area'] = 0
# building_df['tax_exempt'] = 0
# building_df['srcparc_id'] = '0'
# building_df['building_id'] = building_df.index.values
# building_df['stories'] = 30 ###For testing!
# del building_df['unit_price_residential']
# del building_df['unit_price_non_residential']
# del building_df['building_sqft_per_job']
# del building_df['base_year_jobs']
# del building_df['non_residential_units']
# del building_df['all_units']
# print 'Exporting %s buildings to Urbancanvas database for project %s and year %s.' % (building_df.index.size,nextval,current_year)
# output = cStringIO.StringIO()
# building_df.to_csv(output, sep='\t', header=False, index=False)
# output.seek(0)
# cur.copy_from(output, 'building_footprints', columns =tuple(building_df.columns.values.tolist()))
# conn.commit()
# return nextval
# nv = export_to_urbancanvas(newbuildings, sim_year, urbancanvas_scenario_id)
In [8]:
urbancanvas_scenario_id = 0
from drcog.variables import urbancanvas_export
nv = urbancanvas_export.export_to_urbancanvas(newbuildings, sim_year, urbancanvas_scenario_id)
In [9]:
nv
Out[9]: