Code to create a merging table from some tables in input. After comparing the tables (in this case the one in the article of Andrews+13) and understand the rows in common we can create a new table cointaining the columns of interest of the rows in common. There is also a function to calculate the Mass of the disK from the continuum Flux.
In [25]:
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from astropy.table import Table
from astropy import units as u
from astropy.modeling.blackbody import blackbody_nu
Function to calculate the Disk Mass from the Fmm, and the corrispective Error
In [26]:
def Mdisk(F,d,r,k,Td,lam):
#Eq 2 Andrews et al. 2013
#F is *mm* Flux in *Jy*
#d is the distance in *pc*
#r is the dust to mass ratio *adimensional*
#k is the dust opacity in *cm^2/g*
#lam is the wavelenght in *mm*
c=2.99792458e10 # Light velocity *cm/s*
h=6.6260755e-27 # Planck constant in erg*s (cgs)
kb=1.3806488e-16 # Boltzmann constant in erg/K (cgs)
ni=c/(lam*0.1) #in Hz
B_ni=1e23*2*h*ni**3/c**2*(1./(np.exp(h*ni/kb/Td)-1)) # Planck function in *Jy* --> 1Jy=1e-23erg/s/cm^2/Hz
# B_ni=1e23*blackbody_nu(ni, Td) # Planck function in *Jy* --> 1Jy=1e-23erg/s/cm^2/Hz
# return np.log10(F)+2*np.log10(d*3.08568e18)-np.log10(r*k*1.988e33)-np.log10(B_ni) #log(Disk mass/*Msol*)
return F*(d*3.086e18)**2/k/1.988e33/r/B_ni #Disk mass in *Msol*
def eMd(errF,F,Md):
return Md*errF/F
Reading Tables, compare them and remove the rows not in common.
In [27]:
Tab3=Table.read("Tab3_totbin.fit")
Tab2=Table.read('Tab2_totbin.fit')
Tab4=Table.read('Tab4_totbin.fit')
Tab2.remove_rows([75,206]) #remove the row of CIDA11 B and the row not present in table 4 (J04290068+2755033)
Tab3.remove_rows([66,76,134,146])
#remove the rows not present in table 2 (J04263055+2443558, J04335245+2612548, J04361030+2159364)
#and in table 4 (J04290068+2755033)
Tab4.remove_rows([8,10,31,34,76,94,103,110,118,125,126,174,180,195,204,220])
#removing close pairs star (MHO2B,MHO3B, V892TAUB,DFTAUB,ZZTAUB,V710TAUB,GGTAUAb,UZTauEb,
#V807TauBa,V807TauBb,J04403979+2519061B,CoKuTau/4B,DQTauB,St34B,CIDA11B)
name=Tab2['Name'] #sources name
lF=Tab2['l_F1_3'] # sign for upper limit
F1_3=Tab2["F1_3"] # mm Flux at 1.3mm
eF1_3=Tab2['e_F1_3'] #error on mm Flux
SpT=Tab2["SpT"] # Stellar Spectral Type
logM=Tab3["logM_"] # Stellar host mass
b_logMlo=Tab3["b_logM_"] #low value of range of confidence (1sigma)
b_logMup=Tab3["b_logm__lc"] #upper value of range of confidence (1sigma)
logL=Tab4['logL_'] #Stellar host Luminosity
#logT=Tab4['logT_'] #Stellar host Temperature
logM2=Tab3["logM_2"] # Stellar host mass
b_logMlo2=Tab3["b_logM_2"] #low value of range of confidence (1sigma)
b_logMup2=Tab3["b_logm_2_lc"] #upper value of range of confidence (1sigma)
logM3=Tab3["logM_3"] # Stellar host mass
b_logMlo3=Tab3["b_logM_3"] #low value of range of confidence (1sigma)
b_logMup3=Tab3["b_logm_3_lc"] #upper value of range of confidence (1sigma)
##### Error on upper limits
nnan=np.where(np.isnan(eF1_3))
eF1_3[nnan]=F1_3[nnan]/3. #considering 1sigma
#eF1_3[nnan]=1e-20
Calculate the Disk Mass with the function above
In [45]:
d=140 #pc distance of Taurus
r=1 #dust to gass mass ratio
k=2.3 #cm^2/g at 1.3mm
lam=1.33 #wavelenght in mm
L=10.**logL
L_true=np.delete(L,(39,75,95,117,148,165))
L[39]=L[75]=L[95]=L[117]=L[148]=L[165]=np.mean(L_true)
#Temperature of the disk
T_20=20 #K
Td=25.*(L)**(1./4.)
tmin = 10. ###minimum value of T in K
ncold = np.where(Td <tmin)
Td[ncold] = tmin
Md=Mdisk(F1_3,d,r,k,Td,lam)
Md_20=Mdisk(F1_3,d,r,k,T_20,lam)/3e-6 #Md in Earth masses!
logMd=np.log10(Md)
logMd_20=np.log10(Md_20)
calibF1_3=np.sqrt(eF1_3**2+(0.1*F1_3)**2) ##Error on Fmm + calibration error (10%) quadratic sum
e_Md=eMd(calibF1_3,F1_3,Md) #error on Md
e_Md_20=eMd(calibF1_3,F1_3,Md_20) #error on Md
add_Md=np.sqrt(e_Md**2+(0.15*Md)**2) ##Error on Md + additional error (15%) for the ambiguity in assuming distance quadratic sum
add_Md_20=np.sqrt(e_Md_20**2+(0.15*Md_20)**2) ##Error on Md + additional error (15%) for the ambiguity in assuming distance quadratic sum
###Errors on Md in log scale
Dp=np.log10(Md+add_Md)-logMd #superior error bar
Dn=logMd-np.log10(Md-add_Md) #inferior error bar
Dp_20=np.log10(Md_20+add_Md_20)-logMd_20 #superior error bar
Dn_20=logMd_20-np.log10(Md_20-add_Md_20) #inferior error bar
#plt.errorbar((Td),(L), fmt='s')
#plt.xscale('log')
#plt.yscale('log')
#plt.show()
In [46]:
T=Table([name,logM,b_logMlo,b_logMup,lF,logMd, Dp, Dn, logMd_20, Dp_20, Dn_20 ,F1_3,eF1_3,calibF1_3,SpT],
names=('Name','LogM*','bM*_lo','bM*_up','l','LogMd', 'Dp', 'Dn' ,'LogMd_20', 'Dp_20', 'Dn_20',
'F1_3','eF1_3','calibF','SpT'))
#T['LogMd_20'].unit='Mearth'
S=Table([name,logM2,b_logMlo2,b_logMup2,logM3,b_logMlo3,b_logMup3],
names=('Name','LogM*2','bM*_lo2','bM*_up2','LogM*3','bM*_lo3','bM*_up3'))
##### Errors for upper limits
#nnan=np.where(np.isnan(T['eF1_3']))
#T['eF1_3'][nnan]=1e-20
#T['Dp'].fill_value=0
#T['Dn'].fill_value=0
#T_new=T.filled()
#T_new.show_in_browser()
#T_new.show_in_notebook()
Write the new table in a new fit file, and read it
In [48]:
T.write('Table.fit', format='fits', overwrite='True')
S.write('Table_Mass.fit', format='fits', overwrite='True')
New=Table.read("Table.fit")
#New.info()
#New.colnames
New.show_in_browser()
In [9]: