In [1]:
# Having to use Python 2 for astrometry.net compatibility so...
from __future__ import division, print_function, unicode_literals
In [2]:
# Importing all other required libraries.
import os
import ccdproc
import glob
import subprocess
import itertools
import numpy as np
from astropy import units as u
from astropy.io import fits
from astropy.coordinates import SkyCoord
from astropy.wcs import utils as wcsutils
from astropy.table import Table
from astropy.stats import sigma_clip
from astroquery.simbad import Simbad
from matplotlib import pyplot as plt
from matplotlib.colors import LogNorm
from matplotlib import rc, rcParams
In [3]:
%matplotlib inline
In [4]:
rc('text', usetex=True)
rcParams.update({'font.size': 14})
In [5]:
# Absolute path of the top level directory containing raw data
raw_root = "/Volumes/LaCie/Huntsman/HuntsmanEye/raw"
# Creating a dictionary of data paths.
# Only including r' band data from Rainbow Observatory for now (2014-09-20 and on)
paths = {'calib':"/Volumes/LaCie/Huntsman/HuntsmanEye/calib", \
'config':os.path.abspath('./configs'), \
'temp':os.path.abspath('./temp'), \
'raw':[os.path.join(raw_root, "2014-09-20"), \
os.path.join(raw_root, "2014-09-21"), \
os.path.join(raw_root, "2014-09-22"), \
os.path.join(raw_root, "2014-09-23"), \
os.path.join(raw_root, "2014-09-24"), \
os.path.join(raw_root, "2014-09-25"), \
os.path.join(raw_root, "2014-09-26")],\
'red':os.path.abspath('./reduced')}
bias_file = 'master_bias.fits'
dark_file = 'master_dark.fits'
unbiased_dark_file = 'master_unbiased_dark.fits'
flat_file = 'master_flat.fits'
In [6]:
# Create temp and reduced directories if they don't exist
for path in (paths['red'], paths['red']):
try:
# Try to create directory, will raise an exception if it already exists
os.makedirs(path)
except OSError:
# Directory already exists, nothing to do
pass
In [7]:
master_bias = ccdproc.CCDData.read(os.path.join(paths['calib'], bias_file))
master_dark = ccdproc.CCDData.read(os.path.join(paths['calib'], dark_file))
if unbiased_dark_file:
master_unbiased_dark = ccdproc.CCDData.read(os.path.join(paths['calib'], unbiased_dark_file))
else:
master_unbiased_dark = ccdproc.subtract_bias(master_dark, master_bias)
master_flat = ccdproc.CCDData.read(os.path.join(paths['calib'], flat_file))
In [8]:
# List of sequence numbers of exposures with bad data due to bad tracking, severe cloud, etc.
# This is just for 'light' exposures, if we want to re-reduce the flats we'll need the bad
# flat list too.
bad_seq_nums = [[66,70,75,77,78,79,89], \
[9,10,11,12,13,14,15,16,17,18,19,20,21,22], \
[50,51,52], \
[80,81,85,90], \
[], \
[80,81,82,87,88,96,98,99], \
[115,128,130,131,153]]
In [9]:
# Just NGC300 data for now, most of the 300s exposures were on this target.
ngc300_r_names = []
for i, raw_path in enumerate(paths['raw']):
lights = glob.glob(os.path.join(raw_path, "*_light.fits"))
for light in lights:
light_header = fits.getheader(light)
# Only want exposures targeting NGC 300 (duh), and with 300 seconds exposure time.
if (light_header['TARGET'] == 'ngc_300' or light_header['TARGET'] == 'ngc300' \
and light_header['EXPTIME'] == 300 and abs(light_header['TEMPERAT'] + 20) < 0.5):
# Also want to exclude known bad exposures (serious cloud, bad tracking, etc.)
seq_num = int((os.path.basename(light)).split('_')[1])
if not seq_num in bad_seq_nums[i]:
ngc300_r_names.append(light)
In [10]:
len(ngc300_r_names)
Out[10]:
In [11]:
os.chdir(paths['temp'])
for raw in ngc300_r_names:
on = os.path.split(os.path.dirname(raw))[1] + "_" + os.path.basename(raw)
image = ccdproc.CCDData.read(raw, unit='adu')
image = ccdproc.trim_image(image[30:,:])
ccdproc.CCDData.write(image, on, format='fits', clobber=True)
print(on)
In [ ]:
os.chdir(paths['temp'])
for raw in glob.glob('*_light.fits'):
cn = os.path.join(paths['config'], 'NGC300.ww')
wns = os.path.join(paths['calib'], flat_file) + ',' + raw
own = os.path.basename(raw).replace('.fits', '.bdfw.weight.fits')
ofn = os.path.basename(raw).replace('.fits', '.bdfw.flag.fits')
command = "ww -c {} -WEIGHT_NAMES {} -OUTWEIGHT_NAME {} -OUTFLAG_NAME {}".format(cn, wns, own, ofn)
subprocess.call(command, shell=True)
print(raw)
In [ ]:
os.chdir(paths['temp'])
for raw in glob.glob('*_light.fits'):
image = ccdproc.CCDData.read(raw)
image = ccdproc.subtract_bias(image, master_bias)
image = ccdproc.subtract_dark(image, master_unbiased_dark, scale=True, \
data_exposure = image.header['EXPTIME'] * u.second, dark_exposure=300 * u.second)
image = ccdproc.flat_correct(image, master_flat)
on = os.path.basename(raw).replace('.fits', '.bdf.fits')
print(on)
ccdproc.CCDData.write(image, on, format='fits', clobber=True)
In [ ]:
os.chdir(paths['temp'])
for raw in glob.glob('*_light.fits'):
orig = os.path.basename(raw).replace('.fits', '.bdf.fits')
out = orig.replace('.bdf.fits', '.bdfw.fits')
command = 'solve-field --no-plots --ra 14.9 --dec -37.7 --radius 1.5 --new-fits {} {}'.format(out, orig)
subprocess.call(command, shell=True)
print(out)
In [ ]:
os.chdir(paths['temp'])
for raw in glob.glob('*_light.fits'):
cn = os.path.join(paths['config'], 'NGC300.sex')
pn = os.path.join(paths['config'], 'NGC300.param')
inn = os.path.basename(raw).replace('.fits', '.bdfw.fits')
wn = inn.replace('.fits', '.weight.fits')
fn = inn.replace('.fits', '.flag.fits')
catn = inn.replace('.fits', '.cat')
command = 'sex -c {} -CATALOG_NAME {} -PARAMETERS_NAME {} -WEIGHT_IMAGE {} -FLAG_IMAGE {} {}'.format(cn, catn, pn, wn, fn, inn)
subprocess.call(command, shell=True)
print(catn)
In [ ]:
os.chdir(paths['temp'])
# Create an input file list
cln = 'cat_list.txt'
with open(cln, 'w') as cl:
for raw in glob.glob('*_light.fits'):
catn = os.path.basename(raw).replace('.fits', '.bdfw.cat')
cl.write(catn + '\n')
# Using a config file with a small field group separation so that each
# of the 9 dither positions gets labelled as a separate field group
cn = os.path.join(paths['config'], 'NGC300.scamp')
command = 'scamp @{} -c {} '.format(cln, cn)
subprocess.call(command, shell=True)
In [ ]:
os.chdir(paths['temp'])
# Create input file lists
inputfiles = [os.path.abspath(os.path.basename(raw).replace('.fits', '.bdfw.fits')) \
for raw in glob.glob('*_light.fits')]
iln = 'inputlist.txt'
with open(iln, 'w') as il:
for f in inputfiles:
il.write(f + '\n')
groupedfiles = {1:[], 2:[], 3:[], 4:[], 5:[], 6:[], 7:[], 8:[], 9:[]}
for fname in inputfiles:
head = fits.Header.fromtextfile(fname.replace('.fits', '.head'))
try:
groupedfiles[head['FGROUPNO']].append(fname.replace('.fits', '.resamp.fits'))
except KeyError:
pass
for group, fnames in groupedfiles.items():
iln = 'inputlist{}.txt'.format(group)
with open(iln, 'w') as il:
for f in fnames:
il.write(f + '\n')
In [ ]:
os.chdir(paths['temp'])
cn = os.path.join(paths['config'], 'NGC300.swarp')
# Run on all files to create overall median combined image, keep individual resampled files.
outn = os.path.join(paths['red'], 'NGC300_all.fits')
woutn = os.path.join(paths['red'], 'NGC300_all.weight.fits')
command = 'swarp @{} -c {} -DELETE_TMPFILES N -IMAGEOUT_NAME {} -WEIGHTOUT_NAME {}'.format('inputlist.txt', cn, outn, woutn)
subprocess.call(command, shell=True)
In [ ]:
# Copy header from all file stack to .head files for each dither position so
# everything ends up on the same pixel grid
header = fits.Header.fromfile(os.path.join(paths['red'], 'NGC300_all.fits'))
for group in groupedfiles.keys():
header.totextfile(os.path.join(paths['red'], 'NGC300_{}.head'.format(group)), endcard=True, clobber=True)
# Do individual dither positions
for group in groupedfiles.keys():
iln = 'inputlist{}.txt'.format(group)
outn = os.path.join(paths['red'], 'NGC300_{}.fits'.format(group))
woutn = os.path.join(paths['red'], 'NGC300_{}.weight.fits'.format(group))
command = 'swarp @{} -c {} -RESAMPLE N -IMAGEOUT_NAME {} -WEIGHTOUT_NAME {}'.format(iln, cn, outn, woutn)
subprocess.call(command, shell=True)
In [ ]:
os.chdir(os.path.join(paths['red'], 'difference'))
for group in groupedfiles.keys():
# Load reference image for this dither positon
reference = ccdproc.CCDData.read(os.path.join(paths['red'], 'NGC300_{}.fits'.format(group)), unit='adu')
for rfile in groupedfiles[group]:
# Load resampled image
resamp = ccdproc.CCDData.read(rfile, unit='adu')
resamp.wcs.printwcs()
# Apply SCAMP flux scaling, with ugly hack to preserve WCS
wcs = resamp.wcs
resamp = ccdproc.CCDData(resamp.multiply(resamp.header['FLXSCALE'] * u.dimensionless_unscaled), wcs=wcs)
# Work out section of reference image to use
x0 = reference.header['CRPIX1'] - resamp.header['CRPIX1']
x1 = x0 + resamp.header['NAXIS1']
y0 = reference.header['CRPIX2'] - resamp.header['CRPIX2']
y1 = y0 + resamp.header['NAXIS2']
# Trim reference image
ref = ccdproc.trim_image(reference[y0:y1,x0:x1])
# Subtract reference. Ugly hacks needed here too because ccdproc.trim_image doesn't update WCS
resamp.data -= ref
# Write result
resamp.write(os.path.basename(rfile).replace('.fits', '.diff.fits'), clobber=True)
In [ ]: