In [1]:
# hack to get the path right
import sys
sys.path.append('..')
In [2]:
import ztf_sim
from astropy.time import Time
import pandas as pd
import numpy as np
import astropy.units as u
import pylab as plt
import seaborn as sns
%matplotlib inline
In [3]:
sns.set_style('ticks')
sns.set_context('talk')
In [12]:
df = ztf_sim.utils.df_read_from_sqlite('weather_blocks')
In [5]:
df.head()
Out[5]:
In [6]:
plt.hist(df['nexps'])
plt.xlabel('Number of exposures in block')
plt.ylabel('Number of blocks')
sns.despine()
We see a peak near 12 images per block, which at ~100 seconds average time per exposure matches our 20 minute block size:
In [7]:
12*100*u.second.to(u.min)
Out[7]:
In [13]:
blocks_per_day = np.round((1.*u.day).to(u.min)/ztf_sim.constants.TIME_BLOCK_SIZE).astype(np.int)
fig = plt.figure(figsize=(18,24))
# cut off 2017, which is partial
df = df[df['year'] < 2017]
years = np.sort(list(set(df['year'])))
nyears = len(years)
for i, year in enumerate(years):
ax = plt.subplot(nyears,1,i+1)
w = df['year'] == year
# make an array to hold these, including zeros for times without observations
# buggy but sufficient accounting for leap years
ndays = 365
if year % 4 == 0:
ndays += 1
nexps = np.zeros([ndays,blocks_per_day])
for block, n in zip(df[w]['block'],df[w]['nexps']):
nexps.flat[block] = n
sns.heatmap(nexps.T,xticklabels=15,yticklabels=False,vmin=0,vmax=12)
ax.set_ylim([25,70]) # this was guess and check...
ax.set_ylabel(np.int(year))
ax.set_xlabel('Day of Year')
plt.savefig('fig/PTF_exposure_blocks.png',bbox_inches='tight')
Let's look at the raw image times (all filters, including H-alpha) to understand the available observing time.
In [61]:
df = pd.read_table('../data/mjd.txt.gz', sep='|',
names=['expMJD'],
skipfooter=1)
# let's just look at complete iPTF years
df = df[(df['expMJD'] >= Time('2013-01-01').mjd) & (df['expMJD'] < Time('2016-01-01').mjd)]
t = Time(df['expMJD'], format='mjd', location=ztf_sim.utils.P48_loc)
df['datetime'] = t.datetime
df = df.set_index('datetime')
df['month'] = df.index.month
#df['day'] = df.index.day
df['night'] = np.floor(t.mjd).astype(np.int)
#df['night'] = t.iso
#df['night'] = df['night'].apply(lambda x: x.split(' ')[0])
#df['year'] = np.floor(t.decimalyear).astype(np.int)
# these are slow, but faster than me figuring out how to speed them up
#df['month'] = [ti.datetime.month for ti in t]
#df['day'] = [ti.datetime.day for ti in t]
Now count exposures by night
In [65]:
grp = df.groupby(['night'])
nexps = grp['expMJD'].agg(len)
nexps.name = 'nexps'
# make a month dataframe
dfm = grp.agg(lambda x:x)
df = df.join(nexps,on='night')
Convert to observing time in hours using our average time between exposures. This means our observing time estimate is conservative, because it excludes long slews.
In [63]:
df['obstime'] = df['nexps'] * 100./3600.
In [64]:
df.head()
Out[64]:
Aggregate by month:
In [ ]:
grp = df.groupby('month')
In [53]:
# now fill missing dates with zeros--nights with no exposures due to weather or
# other downtime
# (df.asfreq('D') doesn't let me set the fill value)
df = df.reindex(pandas.date_range(df.index.min(),df.index.max()),fill_value=0)
Out[53]:
In [42]:
pd.date_range('2013-01-01','2015-12-31')
Out[42]:
In [ ]: