In [1]:
%matplotlib inline
import matplotlib.pyplot as plt
In [2]:
import numpy as np
In [3]:
import pandas as pd
In [4]:
from sqlalchemy import create_engine
In [5]:
# Uncomment these lines and change directories to write to hdf instead
# dbname = '/Users/rbiswas/data/LSST/OpSimData/kraken_1042_sqlite.db'#enigma_1189_sqlite.db'
# engine = create_engine('sqlite:///' + dbname)
# Summary = pd.read_sql_table('Summary', engine, index_col='obsHistID')
# Summary.to_hdf('/Users/rbiswas/data/LSST/OpSimData/kraken_1042.hdf', 'table')
In [6]:
df = pd.read_hdf('/Users/rbiswas/data/LSST/OpSimData/kraken_1042.hdf', 'table')
In [7]:
df = df.query('fieldID==1427 and propID == 152')
In [8]:
print(df.expMJD.min())
print((df.expMJD.max() + df.expMJD.min()) / 2.)
In [9]:
df.propID.hist()
Out[9]:
In [10]:
import OpSimSummary.summarize_opsim as so
In [11]:
ds = so.SummaryOpsim(summarydf=df)
In [12]:
(df.expMJD.max() + df.expMJD.min()) / 2.
Out[12]:
If I forget dithers and just look at how many observations per field:
In [13]:
full_survey = ds.cadence_plot(fieldID=1427, mjd_center=61404, mjd_range=[-1825, 1825],
observedOnly=False, colorbar=True);
plt.close()
In [14]:
full_survey[0]
Out[14]:
In [15]:
half_survey = ds.cadence_plot(fieldID=1427, mjd_center=61404, mjd_range=[-1825, 1],
observedOnly=False, colorbar=True);
In [16]:
second_year = ds.cadence_plot(fieldID=1427, mjd_center=60200, mjd_range=[-150, 150],
observedOnly=False, colorbar=True);
In [17]:
secondYearObs = ds.cadence_plot(fieldID=1427, mjd_center=60300, mjd_range=[-0, 30], observedOnly=False)
plt.close()
secondYearObs[0]
Out[17]:
In [18]:
df['obsID'] = df.index.values
In [19]:
uniqueObs = df.groupby(['night', 'filter'])
In [20]:
aa = uniqueObs['airmass'].agg({'myInds': lambda x: x.idxmin()}).myInds.astype(int).values
In [21]:
ourOpSim = df.ix[aa]
In [22]:
axs = df.hist(by='filter', column='airmass', histtype='step', lw=2, alpha=1, color='k', normed=True);
axs = df.ix[aa].hist(by='filter', column='airmass', histtype='step', lw=2, alpha=1, color='r', ax=axs, normed=True)
In [23]:
df.obsID.unique().size, df.obsID.size
Out[23]:
In [24]:
ourOpSim.head()
Out[24]:
Our culled opsim that we shall try out first is now 'ourOpSim' . We can write this our to a csv file, or a database. We can also view the list of obsHistIDs
In [25]:
ourOpSim.obsID.values
Out[25]:
In [34]:
ourOpSim.obsID.to_csv('FirstSet_obsHistIDs.csv')
In [35]:
ourOpSim.to_csv('SelectedKrakenVisits.csv')
In [26]:
xx = ourOpSim.groupby(['night', 'filter']).aggregate('count')
In [33]:
assert(all(xx.max() == 1))
In [ ]:
dff = uniqueObs['airmass'].agg({'myInds': lambda x: x.idxmin()})
aa = dff.myInds.astype(int).values
aa.sort()
In [ ]:
l = []
for key in keys:
l.append(uniqueObs.get_group(key).airmass.idxmin())
In [ ]: