In [2]:
%matplotlib
import matplotlib.pyplot as plt
import numpy as np
In [5]:
from StellarSpectra.model import Model
from StellarSpectra.spectrum import DataSpectrum
from StellarSpectra.grid_tools import TRES, HDF5Interface
import scipy.sparse as sp
import numpy as np
myDataSpectrum = DataSpectrum.open("../tests/WASP14/WASP-14_2009-06-15_04h13m57s_cb.spec.flux", orders=np.array([22]))
myInstrument = TRES()
myHDF5Interface = HDF5Interface("../libraries/PHOENIX_submaster.hdf5")
myModel = Model(myDataSpectrum, myInstrument, myHDF5Interface, stellar_tuple=("temp", "logg", "Z", "vsini", "vz", "logOmega"),
cheb_tuple=("c1", "c2", "c3"), cov_tuple=("sigAmp", "logAmp", "l"), region_tuple=("h", "loga", "mu", "sigma"))
myOrderModel = myModel.OrderModels[0]
In [66]:
#!! Only evaluate if you need new wl and fl
ind = myHDF5Interface.ind
wl = myHDF5Interface.wl[ind[0]: ind[1]]
fl = myHDF5Interface.load_flux({"temp":6000, "logg":4.0, "Z":-0.5, "alpha":0})
np.save("wl.npy", wl)
np.save("fl.npy", fl)
The results:
Tests on a fake dataset created with vsini=5.0 km/s still return the correct answer, both assuming no correlated noise and assuming correlated noise.
Tests on WASP-14 fluxes assuming correlated noise give ~10.6 km/s
Generating fake spectra with large vsini of 50km/s and 100 km/s yield datasets (so that TRES kernel should be irrelevant) that have FWHM of 50km/s and 100 km/s respectively. Taking a look at Gray, it seems as though it shouldn't be a 1-to-1 mapping of vsini to FWHM, so this makes me a little bit skeptical.
If I fit WASP14 assuming uncorrelated noise and it still results in posteriors that favor 10.6 km/s, then we know that we have a problem with either the TRES or vsini kernel. THIS IS THE CASE, DAMMIT
The logical conclusions from this exercise that either WASP-14 actually has a $ v \sin i$ of 10 km/s, or or TRES/vsini kernels are wrong. What we do know is that the increased $v \sin i$ determination is not due to the inclusion of correlated noise. Since the published values for WASP-14 vsini are ~5 km/s, AND a previous version of the code also got vsini = 5, then I have to assume that it was a bug introduced in my new version of the code.
Could this be due to using the REAL FFT and not the complex case?
Is there an easy way to independently determine the vsini of our synthetic spectrum? I don't think so. We can start with taking a raw spectrum delivered from interpolator and trying to create a vsini spectrum from both methods.
In [6]:
wl = np.load("wl.npy")
fl = np.load("fl.npy")
In [4]:
plt.plot(wl, fl)
plt.show()
In [40]:
#set vsini
vsini = 5.0 #km/s
In [7]:
from scipy.ndimage.filters import convolve
c_ang = 2.99792458e18 #A s^-1
c_kms = 2.99792458e5 #km s^-1
In [6]:
def karray(center, width, res):
'''Creates a kernel array with an odd number of elements, the central element centered at `center` and spanning
out to +/- width in steps of resolution. Works similar to arange in that it may or may not get all the way to the
edge.'''
neg = np.arange(center - res, center - width, -res)[::-1]
pos = np.arange(center, center + width, res)
kar = np.concatenate([neg, pos])
return kar
@np.vectorize
def vsini_ang(lam0, vsini, dlam=0.01, epsilon=0.6):
'''vsini in km/s. Epsilon is the limb-darkening coefficient, typically 0.6. Formulation uses Eqn 18.14 from Gray,
The Observation and Analysis of Stellar Photospheres, 3rd Edition.'''
lamL = vsini * 1e13 * lam0 / c_ang
lam = karray(0, lamL, dlam)
c1 = 2. * (1 - epsilon) / (np.pi * lamL * (1 - epsilon / 3.))
c2 = epsilon / (2. * lamL * (1 - epsilon / 3.))
series = c1 * np.sqrt(1. - (lam / lamL) ** 2) + c2 * (1. - (lam / lamL) ** 2) ** 2
return series / np.sum(series)
In [8]:
#TRES Instrument Broadening
@np.vectorize
def gauss_kernel(dlam, lam0, V=6.8):
'''V is the FWHM in km/s. lam0 is the central wavelength in A'''
sigma = V / 2.355 * 1e13 #A/s
return np.exp(- (c_ang * dlam / lam0) ** 2 / (2. * sigma ** 2))
def gauss_series(dlam, lam0, V=6.8):
'''sampled from +/- 3sigma at dlam. V is the FWHM in km/s'''
sigma_l = V / (2.355 * c_kms) * lam0 # sigma in AA (lambda)
wl = karray(0., 6 * sigma_l, dlam) # Gaussian kernel stretching +/- 6 sigma in lambda (AA)
gk = gauss_kernel(wl, lam0, V)
return gk / np.sum(gk)
In [12]:
#convolve with stellar broadening (sb)
k = vsini_ang(np.mean(wl), vsini) # stellar rotation kernel centered at order
f_sb = convolve(fl, k)
dlam = wl[1] - wl[0] # spacing of model points for TRES resolution kernel
#convolve with filter to resolution of TRES
filt = gauss_series(dlam, lam0=np.mean(wl))
f_TRES = convolve(f_sb, filt)
In [13]:
plt.plot(wl, f_TRES)
Out[13]:
In [25]:
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy.special import j1
import pyfftw
import StellarSpectra.constants as C
from StellarSpectra.spectrum import create_log_lam_grid, calculate_min_v
vsini = 5.
In [51]:
#Calculate a log_lam grid based upon the length of the DataSpectrum
wl_min, wl_max = np.min(wl), np.max(wl)
wl_dict = create_log_lam_grid(wl_min + 4., wl_max - 4., min_vc=0.08/C.c_kms)
wl_FFT = wl_dict["wl"]
print("Grid stretches from {} to {}".format(wl_min, wl_max))
print("wl_FFT is length {} and spaced by {} km/s".format(len(wl_FFT), calculate_min_v(wl_dict)))
min_v = calculate_min_v(wl_dict)
ss = np.fft.rfftfreq(len(wl_FFT), d=min_v)
print("ss has length {} and is {}".format(len(ss), ss))
#Grab chunk from wl_FFT
chunk = len(wl_FFT)
assert chunk % 2 == 0, "Chunk is not a power of 2. FFT will be too slow."
#How to do this with pyfftw
#influx = pyfftw.n_byte_align_empty(chunk, 16, 'float64')
#FF = pyfftw.n_byte_align_empty(chunk // 2 + 1, 16, 'complex128')
#outflux = pyfftw.n_byte_align_empty(chunk, 16, 'float64')
#fft_object = pyfftw.FFTW(influx, FF, flags=('FFTW_ESTIMATE', 'FFTW_DESTROY_INPUT'))
#ifft_object = pyfftw.FFTW(FF, outflux, flags=('FFTW_ESTIMATE', 'FFTW_DESTROY_INPUT'), direction='FFTW_BACKWARD')
In [54]:
#Interpolate the raw flux to wl_FFT
interp = InterpolatedUnivariateSpline(wl, fl, k=5)
fl_FFT = interp(wl_FFT)
#influx[:] = fl_FFT
#fft_object()
FF = np.fft.rfft(fl_FFT)
sigma = myInstrument.FWHM / 2.35 # in km/s
#Instrumentally broaden the spectrum by multiplying with a Gaussian in Fourier space
taper = np.exp(-2 * (np.pi ** 2) * (sigma ** 2) * (ss ** 2))
ss[0] = 0.01 #junk so we don't get a divide by zero error
#Determine the stellar broadening kernel
ub = 2. * np.pi * vsini * ss
sb = j1(ub) / ub - 3 * np.cos(ub) / (2 * ub ** 2) + 3. * np.sin(ub) / (2 * ub ** 3)
ss[0] = 0.00 #now set back
#set zeroth frequency to 1 separately (DC term)
sb[0] = 1.
taper[0] = 1.
#FF_copy = FF.copy()
#institute velocity and instrumental taper
#FF *= (sb * taper)
FF_tap = FF * sb * taper
#do ifft
#ifft_object()
#Plot outflux
output = np.fft.irfft(FF_tap)
In [92]:
fig, ax = plt.subplots(nrows=4, sharex=True, figsize=(4, 8))
ax[0].plot(ss, FF, label="raw spectrum")
ax[0].set_ylim(-2e10, 2e10)
ax[0].set_ylabel("amplitude")
ax[0].legend()
ax[1].plot(ss, taper, label="PSF")
ax[1].plot(ss, sb, label=r"$v \sin i$")
ax[1].plot(ss, taper * sb, "k", label="both")
ax[1].legend()
ax[2].plot(ss, FF_tap, label="tapered spectrum")
ax[2].set_ylim(-2e10, 2e10)
ax[2].legend()
ax[3].plot(ss, 2.34 * np.sinc(2.34 * ss), label="smallest pixel")
ax[3].plot(ss, 2.77 * np.sinc(2.77 * ss), label="largest pixel")
ax[3].legend(loc="lower left")
ax[-1].set_xlim(0, 0.4)
ax[-1].set_xlabel("cycles [s/km]")
fig.subplots_adjust(hspace=0.18, left=0.17, right=0.94, top=0.97)
fig.savefig("../plots/pixel_response.png")
fig.savefig("../plots/pixel_response.eps")
#plt.show()
In [ ]:
#Show that the pixel sizes do not affect the FFT
def plot_pixel_effect():
fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(8, 8))
#fig = plt.figure()
#ax = fig.add_subplot(111)
out = fft(ifftshift(f_full))
freqs = fftfreq(len(f_full), d=0.01) # spacing, Ang
sfreqs = fftshift(freqs)
taper = gauss_taper(freqs, sigma=0.0496) #Ang, corresponds to 2.89 km/s at 5150A.
tout = out * taper
for ax in axs[:, 0]:
ax.plot(sfreqs, fftshift(tout) / tout[0])
ax.plot(sfreqs, fftshift(taper))
ax.plot(sfreqs, 0.0395 * np.sinc(0.0395 * sfreqs))
ax.plot(sfreqs, 0.0472 * np.sinc(0.0472 * sfreqs))
for ax in axs[:, 1]:
ax.plot(sfreqs, 10 * np.log10(np.abs(fftshift(tout) / tout[0])))
ax.plot(sfreqs, 10 * np.log10(np.abs(fftshift(taper))))
ax.plot(sfreqs, 10 * np.log10(np.abs(0.0395 * np.sinc(0.0395 * sfreqs))))
ax.plot(sfreqs, 10 * np.log10(np.abs(0.0472 * np.sinc(0.0472 * sfreqs))))
axs[0, 0].set_ylabel("Norm amp")
axs[1, 0].set_ylabel("Norm amp")
axs[0, 1].set_ylabel("dB")
axs[1, 1].set_ylabel("dB")
for ax in axs.flatten():
ax.set_xlabel(r"cycles/$\lambda$")
plt.show()
In [55]:
#TRES convolution was done first this way
f = fl
wg_raw = wl
wg_fine = wl_FFT
wg_fine_d =
def resample_and_convolve(f, wg_raw, wg_fine, wg_coarse, wg_fine_d=0.35, sigma=2.89):
'''Take a full-resolution PHOENIX model spectrum `f`, with raw spacing wg_raw, resample it to wg_fine
(done because the original grid is not log-linear spaced), instrumentally broaden it in the Fourier domain,
then resample it to wg_coarse. sigma in km/s.'''
#resample PHOENIX to 0.35km/s spaced grid using InterpolatedUnivariateSpline. First check to make sure there
#are no duplicates and the wavelength is increasing, otherwise the spline will fail and return NaN.
wl_sorted, ind = np.unique(wg_raw, return_index=True)
fl_sorted = f[ind]
interp_fine = InterpolatedUnivariateSpline(wl_sorted, fl_sorted)
f_grid = interp_fine(wg_fine)
#Fourier Transform
out = fft(f_grid)
#The frequencies (cycles/km) corresponding to each point
freqs = fftfreq(len(f_grid), d=wg_fine_d)
#Instrumentally broaden the spectrum by multiplying with a Gaussian in Fourier space (corresponding to FWHM 6.8km/s)
taper = np.exp(-2 * (np.pi ** 2) * (sigma ** 2) * (freqs ** 2))
tout = out * taper
#Take the broadened spectrum back to wavelength space
f_grid6 = ifft(tout)
#print("Total of imaginary components", np.sum(np.abs(np.imag(f_grid6))))
#Resample the broadened spectrum to a uniform coarse grid
interp_coarse = InterpolatedUnivariateSpline(wg_fine, np.abs(f_grid6))
f_coarse = interp_coarse(wg_coarse)
del interp_fine
del interp_coarse
gc.collect() #necessary to prevent memory leak!
return f_coarse
In [56]:
#vsini was calculated this way
#Constant for all models
ss = np.fft.fftfreq(len(wave_grid), d=2.) #2km/s spacing for wave_grid
f_full = pyfftw.n_byte_align_empty(chunk, 16, 'complex128')
FF = pyfftw.n_byte_align_empty(chunk, 16, 'complex128')
blended = pyfftw.n_byte_align_empty(chunk, 16, 'complex128')
blended_real = pyfftw.n_byte_align_empty(chunk, 16, "float64")
fft_object = pyfftw.FFTW(f_full, FF)
ifft_object = pyfftw.FFTW(FF, blended, direction='FFTW_BACKWARD')
def model(wlsz, temp, logg, Z, vsini, Av, flux_factor):
'''Given parameters, return the model, exactly sliced to match the format of the echelle spectra in `efile`.
`temp` is effective temperature of photosphere. vsini in km/s. vz is radial velocity, negative values imply
blueshift. Assumes M, R are in solar units, and that d is in parsecs'''
#wlsz has length norders
#M = M * M_sun #g
#R = R * R_sun #cm
#d = d * pc #cm
#logg = np.log10(G * M / R**2)
#flux_factor = R**2/d**2 #prefactor by which to multiply model flux (at surface of star) to get recieved TRES flux
#Loads the ENTIRE spectrum, not limited to a specific order
f_full[:] = flux_factor * flux(temp, logg, Z)
#f_full = flux_factor * flux(temp, logg, Z)
#Take FFT of f_grid
#FF = fft(f_full)
fft_object()
#How things are done now
#Determine the stellar broadening kernel
# ub = 2. * np.pi * self.vsini * self.ss
# sb = j1(ub) / ub - 3 * np.cos(ub) / (2 * ub ** 2) + 3. * np.sin(ub) / (2 * ub ** 3)
#set zeroth frequency to 1 separately (DC term)
# sb[0] = 1.
# taper[0] = 1. #should this really be set to 1?
#institute velocity and instrumental taper
# self.FF *= sb * taper
#
#
ss[0] = 0.01 #junk so we don't get a divide by zero error
ub = 2. * np.pi * vsini * ss
sb = j1(ub) / ub - 3 * np.cos(ub) / (2 * ub ** 2) + 3. * np.sin(ub) / (2 * ub ** 3)
#set zeroth frequency to 1 separately (DC term)
sb[0] = 1.
FF[:] *= sb #institute velocity taper
#FF *= sb
#do ifft
ifft_object()
#blended_real = np.abs(ifft(FF))
blended_real[:] = np.abs(blended) #remove tiny complex component
#redden spectrum
red = blended_real / 10 ** (0.4 * Av * red_grid)
#red = blended_real
#do synthetic photometry to compare to points
f = InterpolatedUnivariateSpline(wave_grid, red)
fresult = f(wlsz.flatten()) #do spline interpolation to TRES pixels
result = np.reshape(fresult, (norder, -1))
del f
gc.collect() #necessary to prevent memory leak!
return result