ML match for LOFAR and the combined PanSTARRS WISE catalogue: Source catalogue

Configuration

Load libraries and setup


In [1]:
import numpy as np
from astropy.table import Table
from astropy import units as u
from astropy.coordinates import SkyCoord, search_around_sky
from IPython.display import clear_output
import pickle
import os
from glob import glob
from shutil import copyfile

In [2]:
from mltier1 import (get_center, get_n_m, estimate_q_m, Field, SingleMLEstimatorU, MultiMLEstimatorU,
                     parallel_process, get_sigma_all, get_q_m, get_threshold, q0_min_level, q0_min_numbers,
                     get_n_m_kde, estimate_q_m_kde, get_q_m_kde, describe)

In [3]:
%load_ext autoreload

In [4]:
%autoreload

In [5]:
%pylab inline


Populating the interactive namespace from numpy and matplotlib

General configuration


In [6]:
save_intermediate = True
plot_intermediate = True

In [7]:
idp = "idata/main_pdf_v0.8-b"

In [8]:
if not os.path.isdir(idp):
    os.makedirs(idp)

Area limits


In [9]:
ra_down0 = 170.0
ra_up0 = 190.0
dec_down0 = 45.5
dec_up0 = 56.5

In [10]:
# Busy week Edinburgh 2017
ra_down = 172.09
ra_up = 187.5833
dec_down = 46.106
dec_up = 56.1611

In [11]:
# Busy week Hatfield 2017
ra_down = 170.
ra_up = 190.
dec_down = 46.8
dec_up = 55.9

In [12]:
field = Field(170.0, 190.0, 46.8, 55.9)

In [13]:
field_full = Field(160.0, 232.0, 42.0, 62.0)

Load data


In [14]:
combined_all = Table.read("pw.fits")

We will start to use the updated catalogues that include the output of the LOFAR Galaxy Zoo work.


In [15]:
#lofar_all = Table.read("data/LOFAR_HBA_T1_DR1_catalog_v0.9.srl.fits")
lofar_all = Table.read("data/LOFAR_HBA_T1_DR1_merge_ID_optical_v0.8.fits")

In [16]:
np.array(combined_all.colnames)


Out[16]:
array(['AllWISE', 'objID', 'ra', 'dec', 'raErr', 'decErr', 'W1mag',
       'W1magErr', 'i', 'iErr'], dtype='<U8')

In [17]:
np.array(lofar_all.colnames)


Out[17]:
array(['Source_Name', 'RA', 'E_RA', 'DEC', 'E_DEC', 'Peak_flux',
       'E_Peak_flux', 'Total_flux', 'E_Total_flux', 'Maj', 'E_Maj', 'Min',
       'E_Min', 'DC_Maj', 'E_DC_Maj', 'DC_Min', 'E_DC_Min', 'PA', 'E_PA',
       'DC_PA', 'E_DC_PA', 'Isl_rms', 'S_Code', 'Mosaic_ID',
       'Masked_Fraction', 'ID_flag', 'ID_name', 'ID_ra', 'ID_dec',
       'ML_LR', 'LGZ_Size', 'LGZ_Width', 'LGZ_PA', 'LGZ_Assoc',
       'LGZ_Assoc_Qual', 'LGZ_ID_Qual', 'AllWISE', 'objID', 'gFApFlux',
       'gFApFluxErr', 'gFApMag', 'gFApMagErr', 'rFApFlux', 'rFApFluxErr',
       'rFApMag', 'rFApMagErr', 'iFApFlux', 'iFApFluxErr', 'iFApMag',
       'iFApMagErr', 'zFApFlux', 'zFApFluxErr', 'zFApMag', 'zFApMagErr',
       'yFApFlux', 'yFApFluxErr', 'yFApMag', 'yFApMagErr', 'w1Flux',
       'w1FluxErr', 'w1Mag', 'w1MagErr', 'w2Flux', 'w2FluxErr', 'w2Mag',
       'w2MagErr', 'w3Flux', 'w3FluxErr', 'w3Mag', 'w3MagErr', 'w4Flux',
       'w4FluxErr', 'w4Mag', 'w4MagErr', 'XrayClass', '2RXS_ID',
       'XMMSL2_ID', 'IRClass', 'z_spec', 'z_source', 'specAGN', 'mqcAGN',
       'EBV', 'objName', 'z_best', 'z_best_source', 'z1_median', 'z1_min',
       'z1_max', 'z1_area', 'z2_median', 'z2_min', 'z2_max', 'z2_area',
       'chi_r_eazy', 'chi_r_atlas', 'chi_r_cosmos', 'chi_r_stellar',
       'stellar_type', 'z_gpz', 'z_gpz_err'], dtype='<U15')

We show here the codes used and the number of sources with each code


In [20]:
codes, counts = np.unique(lofar_all['ID_flag'], return_counts=True)

In [19]:
print(list(zip(codes.filled(), counts)))


[(1, 300135), (2, 974), (4, 3410), (311, 10023), (312, 1675), (600, 645), (601, 69), (602, 1592), (603, 204), (610, 42)]

The columns of the original catalogue were the following: array(['Source_Name', 'RA', 'E_RA', 'E_RA_tot', 'DEC', 'E_DEC', 'E_DEC_tot', 'Peak_flux', 'E_Peak_flux', 'E_Peak_flux_tot', 'Total_flux', 'E_Total_flux', 'E_Total_flux_tot', 'Maj', 'E_Maj', 'Min', 'E_Min', 'PA', 'E_PA', 'Isl_rms', 'S_Code', 'Mosaic_ID', 'Isl_id'], dtype='<U16')


In [21]:
describe(lofar_all['Maj'])


8.148 +/- 2.671; median: 7.387; limits: [0.715, 84.092]; N=318769 (12984 NaN; 0 masked)
/disk2/jsm/prog/anaconda3/lib/python3.6/site-packages/numpy/lib/function_base.py:4147: UserWarning: Warning: 'partition' will ignore the 'mask' of the MaskedColumn.
  part.partition(kth)

Filter catalogues

We will take the sources in the main region but also discard sources with a Major axis size bigger than 30 arsecs. We will also discard all the sources that are not classified with the code 1 in "ID_flag". Henceforth, we only take sources marked as "ML" only.


In [22]:
lofar_aux = lofar_all[~np.isnan(lofar_all['Maj'])]

In [23]:
lofar = field.filter_catalogue(lofar_aux[(lofar_aux['Maj'] < 30.) &
                                         (lofar_aux['ID_flag'] == 1)], 
                               colnames=("RA", "DEC"))

In [24]:
lofar_full = field_full.filter_catalogue(lofar_aux[(lofar_aux['Maj'] < 30.) &
                                                   (lofar_aux['ID_flag'] == 1)], 
                                         colnames=("RA", "DEC"))

In [25]:
combined = field.filter_catalogue(combined_all, 
                               colnames=("ra", "dec"))

Additional data


In [26]:
combined["colour"] = combined["i"] - combined["W1mag"]

In [27]:
combined_aux_index = np.arange(len(combined))

Sky coordinates


In [28]:
coords_combined = SkyCoord(combined['ra'], 
                           combined['dec'], 
                           unit=(u.deg, u.deg), 
                           frame='icrs')

In [29]:
coords_lofar = SkyCoord(lofar['RA'], 
                       lofar['DEC'], 
                       unit=(u.deg, u.deg), 
                       frame='icrs')

Class of sources in the combined catalogue

The sources are grouped depending on the available photometric data.


In [30]:
combined_matched = (~np.isnan(combined["i"]) & ~np.isnan(combined["W1mag"])) # Matched i-W1 sources

In [31]:
combined_panstarrs = (~np.isnan(combined["i"]) & np.isnan(combined["W1mag"])) # Sources with only i-band

In [32]:
combined_wise =(np.isnan(combined["i"]) & ~np.isnan(combined["W1mag"])) # Sources with only W1-band

In [33]:
combined_i = combined_matched | combined_panstarrs
combined_w1 = combined_matched | combined_wise
#combined_only_i = combined_panstarrs & ~combined_matched
#combined_only_w1 = combined_wise & ~combined_matched

In [34]:
print("Total    - ", len(combined))
print("i and W1 - ", np.sum(combined_matched))
print("Only i   - ", np.sum(combined_panstarrs))
print("With i   - ", np.sum(combined_i))
print("Only W1  - ", np.sum(combined_wise))
print("With W1  - ", np.sum(combined_w1))


Total    -  3557679
i and W1 -  1109412
Only i   -  1778426
With i   -  2887838
Only W1  -  669839
With W1  -  1779251

Colour categories

The colour categories will be used after the first ML match


In [35]:
colour_limits = [0.0, 0.5, 1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0, 3.5, 4.0]

In [36]:
# Start with the W1-only, i-only and "less than lower colour" bins
colour_bin_def = [{"name":"only W1", "condition": combined_wise},
                  {"name":"only i", "condition": combined_panstarrs},
                  {"name":"-inf to {}".format(colour_limits[0]), 
                   "condition": (combined["colour"] < colour_limits[0])}]

# Get the colour bins
for i in range(len(colour_limits)-1):
    name = "{} to {}".format(colour_limits[i], colour_limits[i+1])
    condition = ((combined["colour"] >= colour_limits[i]) & 
                 (combined["colour"] < colour_limits[i+1]))
    colour_bin_def.append({"name":name, "condition":condition})

# Add the "more than higher colour" bin
colour_bin_def.append({"name":"{} to inf".format(colour_limits[-1]), 
                       "condition": (combined["colour"] >= colour_limits[-1])})


/disk2/jsm/prog/anaconda3/lib/python3.6/site-packages/astropy/table/column.py:954: RuntimeWarning: invalid value encountered in less
  return getattr(self.data, op)(other)
/disk2/jsm/prog/anaconda3/lib/python3.6/site-packages/astropy/table/column.py:954: RuntimeWarning: invalid value encountered in greater_equal
  return getattr(self.data, op)(other)

In [37]:
combined["category"] = np.nan
for i in range(len(colour_bin_def)):
    combined["category"][colour_bin_def[i]["condition"]] = i

In [38]:
np.sum(np.isnan(combined["category"]))


Out[38]:
2

We get the number of sources of the combined catalogue in each colour category. It will be used at a later stage to compute the $Q_0$ values


In [39]:
numbers_combined_bins = np.array([np.sum(a["condition"]) for a in colour_bin_def])

In [40]:
numbers_combined_bins


Out[40]:
array([ 669839, 1778426,  123221,   86953,  127155,   91489,  106076,
        112638,  109949,   98183,   81055,   61913,   44390,   46507,
         14347,    5536])

Description

Sky coverage


In [41]:
plt.rcParams["figure.figsize"] = (15,5)
plot(lofar_all["RA"],
     lofar_all["DEC"],
     ls="", marker=",");



In [42]:
plt.rcParams["figure.figsize"] = (15,5)
plot(lofar_full["RA"],
     lofar_full["DEC"],
     ls="", marker=",");



In [43]:
plt.rcParams["figure.figsize"] = (8,5)
plot(lofar["RA"],
     lofar["DEC"],
     ls="", marker=",");



In [44]:
len(lofar)


Out[44]:
91332

Summary of galaxy types in the combined catalogue


In [45]:
np.sum(combined_matched) # Matches


Out[45]:
1109412

In [46]:
plt.rcParams["figure.figsize"] = (15,5)
subplot(1,3,1)
hist(combined["i"][combined_matched], bins=50)
xlabel("i")
subplot(1,3,2)
hist(combined["W1mag"][combined_matched], bins=50)
xlabel("W1")
subplot(1,3,3)
hist((combined["i"] - combined["W1mag"])[combined_matched], bins=50)
xlabel("(i - W1)");



In [47]:
np.sum(combined_panstarrs) # Only PanSTARSS


Out[47]:
1778426

In [48]:
plt.rcParams["figure.figsize"] = (15,5)
subplot(1,3,1)
hist(combined["i"][combined_panstarrs], bins=50)
hist(combined["i"][combined_matched], bins=50, alpha=0.4)
xlabel("i")
subplot(1,3,2)
hist((combined["i"] - 17.4)[combined_matched], bins=50)
hist((combined["i"] - combined["W1mag"])[combined_matched], bins=50, alpha=0.4)
xlabel("(i - W1) (W1 lim. = 17.4)")
subplot(1,3,3)
hist((combined["i"] - 18.4)[combined_matched], bins=50)
hist((combined["i"] - combined["W1mag"])[combined_matched], bins=50, alpha=0.4)
xlabel("(i - W1) (W1 lim. = 18.4)");



In [49]:
np.sum(combined_wise) # Only WISE


Out[49]:
669839

In [50]:
plt.rcParams["figure.figsize"] = (15,5)
subplot(1,3,1)
hist(combined["W1mag"][combined_wise], bins=50)
hist(combined["W1mag"][combined_matched], bins=50, alpha=0.4)
xlabel("W1")
subplot(1,3,2)
hist((22 - combined["W1mag"])[combined_wise], bins=50)
hist((combined["i"] - combined["W1mag"])[combined_matched], bins=50, alpha=0.4)
xlabel("(i - W1) (i lim. = 22)")
subplot(1,3,3)
hist((23 - combined["W1mag"])[combined_wise], bins=50)
hist((combined["i"] - combined["W1mag"])[combined_matched], bins=50, alpha=0.4)
xlabel("(i - W1) (i lim. = 23)");


Maximum Likelihood 1st

i-band preparation


In [51]:
bandwidth_i = 0.5

In [52]:
catalogue_i = combined[combined_i]

In [53]:
bin_list_i = np.linspace(12., 30., 361) # Bins of 0.05

In [54]:
center_i = get_center(bin_list_i)

In [55]:
n_m_i1 = get_n_m(catalogue_i["i"], bin_list_i, field.area)

In [56]:
n_m_i = get_n_m_kde(catalogue_i["i"], center_i, field.area, bandwidth=bandwidth_i)

In [57]:
n_m_i_cs = np.cumsum(n_m_i)

In [58]:
plt.rcParams["figure.figsize"] = (5,5)
plot(center_i, n_m_i1);
plot(center_i, n_m_i_cs);



In [57]:
plt.rcParams["figure.figsize"] = (5,5)
plot(center_i, n_m_i1);
plot(center_i, n_m_i_cs);



In [59]:
q_m_i1 = estimate_q_m(catalogue_i["i"], 
                      bin_list_i, 
                      n_m_i1, 
                      coords_lofar, 
                      coords_combined[combined_i], 
                      radius=5)

In [60]:
q_m_i = estimate_q_m_kde(catalogue_i["i"], 
                      center_i, 
                      n_m_i, 
                      coords_lofar, 
                      coords_combined[combined_i], 
                      radius=5, 
                      bandwidth=bandwidth_i)

In [61]:
q_m_i_cs = np.cumsum(q_m_i)

In [62]:
plt.rcParams["figure.figsize"] = (5,5)
plot(center_i, q_m_i1);
plot(center_i, q_m_i_cs);



In [61]:
plt.rcParams["figure.figsize"] = (5,5)
plot(center_i, q_m_i1);
plot(center_i, q_m_i_cs);



In [63]:
plt.rcParams["figure.figsize"] = (5,5)
plot(center_i, q_m_i1/n_m_i1);
plot(center_i, q_m_i/n_m_i);



In [62]:
plt.rcParams["figure.figsize"] = (5,5)
plot(center_i, q_m_i1/n_m_i1);
plot(center_i, q_m_i/n_m_i);


W1-band preparation


In [64]:
bandwidth_w1 = 0.5

In [65]:
catalogue_w1 = combined[combined_w1]

In [66]:
bin_list_w1 = np.linspace(10., 23., 261) # Bins of 0.05

In [67]:
center_w1 = get_center(bin_list_w1)

In [68]:
n_m_w11 = get_n_m(catalogue_w1["W1mag"], bin_list_w1, field.area)

In [69]:
n_m_w1 = get_n_m_kde(catalogue_w1["W1mag"], center_w1, field.area, bandwidth=bandwidth_w1)

In [70]:
plt.rcParams["figure.figsize"] = (5,5)
plot(center_w1, n_m_w11);
plot(center_w1, np.cumsum(n_m_w1));



In [69]:
plt.rcParams["figure.figsize"] = (5,5)
plot(center_w1, n_m_w11);
plot(center_w1, np.cumsum(n_m_w1));



In [71]:
q_m_w11 = estimate_q_m(catalogue_w1["W1mag"], 
                      bin_list_w1, 
                      n_m_w11, coords_lofar, 
                      coords_combined[combined_w1], 
                      radius=5)

In [72]:
q_m_w1 = estimate_q_m_kde(catalogue_w1["W1mag"], 
                      center_w1, 
                      n_m_w1, coords_lofar, 
                      coords_combined[combined_w1], 
                      radius=5, 
                      bandwidth=bandwidth_w1)

In [73]:
plot(center_w1, q_m_w11);
plot(center_w1, np.cumsum(q_m_w1));



In [72]:
plot(center_w1, q_m_w11);
plot(center_w1, np.cumsum(q_m_w1));



In [74]:
plot(center_w1, q_m_w11/n_m_w11);
plot(center_w1, q_m_w1/n_m_w1);



In [73]:
plot(center_w1, q_m_w11/n_m_w11);
plot(center_w1, q_m_w1/n_m_w1);


$Q_0$ and likelihood estimators


In [75]:
# Old
Q0_i = 0.545
Q0_w1 = 0.77
# Better Q0
Q0_i = 0.503
Q0_w1 = 0.699
# Cleaned v0.7 Q0
Q0_i = 0.518
Q0_w1 = 0.713
# Cleaned v0.8 Q0 - 4 arcsecs
Q0_i = 0.512
Q0_w1 = 0.700

In [76]:
likelihood_ratio_i = SingleMLEstimatorU(Q0_i, n_m_i, q_m_i, center_i)

In [77]:
likelihood_ratio_w1 = SingleMLEstimatorU(Q0_w1, n_m_w1, q_m_w1, center_w1)

We will get the number of CPUs to use in parallel in the computations


In [78]:
import multiprocessing

In [79]:
n_cpus_total = multiprocessing.cpu_count()

In [80]:
n_cpus = max(1, n_cpus_total-1)

i-band match


In [81]:
radius = 15

In [82]:
idx_lofar, idx_i, d2d, d3d = search_around_sky(
    coords_lofar, coords_combined[combined_i], radius*u.arcsec)

In [83]:
idx_lofar_unique = np.unique(idx_lofar)

In [84]:
lofar["lr_i"] = np.nan                   # Likelihood ratio
lofar["lr_dist_i"] = np.nan              # Distance to the selected source
lofar["lr_index_i"] = np.nan             # Index of the PanSTARRS source in combined

In [85]:
total_sources = len(idx_lofar_unique)
combined_aux_index = np.arange(len(combined))

In [86]:
def ml(i):
    idx_0 = idx_i[idx_lofar == i]
    d2d_0 = d2d[idx_lofar == i]
    i_mag = catalogue_i["i"][idx_0]
    
    lofar_ra = lofar[i]["RA"]
    lofar_dec = lofar[i]["DEC"]
    lofar_pa = lofar[i]["PA"]
    lofar_maj_err = lofar[i]["E_Maj"]
    lofar_min_err = lofar[i]["E_Min"]
    c_ra = catalogue_i["ra"][idx_0]
    c_dec = catalogue_i["dec"][idx_0]
    c_ra_err = catalogue_i["raErr"][idx_0]
    c_dec_err = catalogue_i["decErr"][idx_0]
    
    sigma, sigma_maj, sigma_min = get_sigma_all(lofar_maj_err, lofar_min_err, lofar_pa, 
                      lofar_ra, lofar_dec, 
                      c_ra, c_dec, c_ra_err, c_dec_err)
    
    lr_0 = likelihood_ratio_i(i_mag, d2d_0.arcsec, sigma, sigma_maj, sigma_min)
    chosen_index = np.argmax(lr_0)
    result = [combined_aux_index[combined_i][idx_0[chosen_index]], # Index
              (d2d_0.arcsec)[chosen_index],                        # distance
              lr_0[chosen_index]]                                  # LR
    return result

In [87]:
res = parallel_process(idx_lofar_unique, ml, n_jobs=n_cpus)


100%|██████████| 79.9k/79.9k [04:28<00:00, 297it/s]
100%|██████████| 79869/79869 [00:00<00:00, 378653.20it/s]

In [88]:
(lofar["lr_index_i"][idx_lofar_unique], 
 lofar["lr_dist_i"][idx_lofar_unique], 
 lofar["lr_i"][idx_lofar_unique]) = list(map(list, zip(*res)))

Threshold and selection for i-band


In [89]:
lofar["lr_i"][np.isnan(lofar["lr_i"])] = 0

In [90]:
threshold_i = np.percentile(lofar["lr_i"], 100*(1 - Q0_i))

In [91]:
threshold_i #4.8 before


Out[91]:
4.000314360889485

In [92]:
plt.rcParams["figure.figsize"] = (15,6)
subplot(1,2,1)
hist(lofar[lofar["lr_i"] != 0]["lr_i"], bins=200)
vlines([threshold_i], 0, 1000)
ylim([0,1000])
subplot(1,2,2)
hist(np.log10(lofar[lofar["lr_i"] != 0]["lr_i"]+1), bins=200)
vlines(np.log10(threshold_i+1), 0, 1000)
ticks, _ = xticks()
xticks(ticks, ["{:.1f}".format(10**t-1) for t in ticks])
ylim([0,1000]);



In [93]:
lofar["lr_index_sel_i"] = lofar["lr_index_i"]
lofar["lr_index_sel_i"][lofar["lr_i"] < threshold_i] = np.nan

W1-band match


In [94]:
idx_lofar, idx_i, d2d, d3d = search_around_sky(
    coords_lofar, coords_combined[combined_w1], radius*u.arcsec)

In [95]:
idx_lofar_unique = np.unique(idx_lofar)

In [96]:
lofar["lr_w1"] = np.nan                   # Likelihood ratio
lofar["lr_dist_w1"] = np.nan              # Distance to the selected source
lofar["lr_index_w1"] = np.nan             # Index of the PanSTARRS source in combined

In [97]:
def ml_w1(i):
    idx_0 = idx_i[idx_lofar == i]
    d2d_0 = d2d[idx_lofar == i]
    w1_mag = catalogue_w1["W1mag"][idx_0]
    
    lofar_ra = lofar[i]["RA"]
    lofar_dec = lofar[i]["DEC"]
    lofar_pa = lofar[i]["PA"]
    lofar_maj_err = lofar[i]["E_Maj"]
    lofar_min_err = lofar[i]["E_Min"]
    c_ra = catalogue_w1["ra"][idx_0]
    c_dec = catalogue_w1["dec"][idx_0]
    c_ra_err = catalogue_w1["raErr"][idx_0]
    c_dec_err = catalogue_w1["decErr"][idx_0]
    
    sigma, sigma_maj, sigma_min = get_sigma_all(lofar_maj_err, lofar_min_err, lofar_pa, 
                      lofar_ra, lofar_dec, 
                      c_ra, c_dec, c_ra_err, c_dec_err)
    
    lr_0 = likelihood_ratio_w1(w1_mag, d2d_0.arcsec, sigma, sigma_maj, sigma_min)
    chosen_index = np.argmax(lr_0)
    result = [combined_aux_index[combined_w1][idx_0[chosen_index]], # Index
              (d2d_0.arcsec)[chosen_index],                        # distance
              lr_0[chosen_index]]                                  # LR
    return result

In [98]:
res = parallel_process(idx_lofar_unique, ml_w1, n_jobs=n_cpus)


100%|██████████| 83.7k/83.7k [07:15<00:00, 192it/s]
100%|██████████| 83719/83719 [00:00<00:00, 378953.99it/s]

In [99]:
(lofar["lr_index_w1"][idx_lofar_unique], 
 lofar["lr_dist_w1"][idx_lofar_unique], 
 lofar["lr_w1"][idx_lofar_unique]) = list(map(list, zip(*res)))

Threshold and selection for W1 band


In [100]:
lofar["lr_w1"][np.isnan(lofar["lr_w1"])] = 0

In [101]:
threshold_w1 = np.percentile(lofar["lr_w1"], 100*(1 - Q0_w1))

In [102]:
threshold_w1 # 0.695 before


Out[102]:
0.6531932089626912

In [105]:
plt.rcParams["figure.figsize"] = (15,6)
subplot(1,2,1)
hist(lofar[lofar["lr_w1"] != 0]["lr_w1"], bins=200)
vlines([threshold_w1], 0, 1000)
ylim([0,1000])
subplot(1,2,2)
hist(np.log10(lofar[lofar["lr_w1"] != 0]["lr_w1"]+1), bins=200)
vlines(np.log10(threshold_w1+1), 0, 1000)
ticks, _ = xticks()
xticks(ticks, ["{:.1f}".format(10**t-1) for t in ticks])
ylim([0,1000]);



In [104]:
lofar["lr_index_sel_w1"] = lofar["lr_index_w1"]
lofar["lr_index_sel_w1"][lofar["lr_w1"] < threshold_w1] = np.nan

Final selection of the match

We combine the ML matching done in i-band and W1-band. All the galaxies were the LR is above the selection ratio for the respective band are finally selected.


In [106]:
# lr_i_and_w1 = (lofar["lr_i"] != 0) & (lofar["lr_w1"] != 0)
# lr_only_i = (lofar["lr_i"] != 0) & (lofar["lr_w1"] == 0)
# lr_only_w1 = (lofar["lr_i"] == 0) & (lofar["lr_w1"] != 0)
# lr_no_match = (lofar["lr_i"] == 0) & (lofar["lr_w1"] == 0)
lr_i_and_w1 = ~np.isnan(lofar["lr_index_sel_i"]) & ~np.isnan(lofar["lr_index_sel_w1"])
lr_only_i = ~np.isnan(lofar["lr_index_sel_i"]) & np.isnan(lofar["lr_index_sel_w1"])
lr_only_w1 = np.isnan(lofar["lr_index_sel_i"]) & ~np.isnan(lofar["lr_index_sel_w1"])
lr_no_match = np.isnan(lofar["lr_index_sel_i"]) & np.isnan(lofar["lr_index_sel_w1"])

In [107]:
print(np.sum(lr_i_and_w1))
print(np.sum(lr_only_i))
print(np.sum(lr_only_w1))
print(np.sum(lr_no_match))


43470
3292
20462
24108

In [105]:
print(np.sum(lr_i_and_w1))
print(np.sum(lr_only_i))
print(np.sum(lr_only_w1))
print(np.sum(lr_no_match))


43425
3337
20507
24063

In [108]:
lofar["lr_index_1"] = np.nan
lofar["lr_dist_1"] = np.nan
lofar["lr_1"] = np.nan
lofar["lr_type_1"] = 0

In [109]:
# Only i matches
lofar["lr_1"][lr_only_i] = lofar["lr_i"][lr_only_i]
lofar["lr_index_1"][lr_only_i] = lofar["lr_index_i"][lr_only_i]
lofar["lr_dist_1"][lr_only_i] = lofar["lr_dist_i"][lr_only_i]
lofar["lr_type_1"][lr_only_i] = 1

# Only w1 matches
lofar["lr_1"][lr_only_w1] = lofar["lr_w1"][lr_only_w1]
lofar["lr_index_1"][lr_only_w1] = lofar["lr_index_w1"][lr_only_w1]
lofar["lr_dist_1"][lr_only_w1] = lofar["lr_dist_w1"][lr_only_w1]
lofar["lr_type_1"][lr_only_w1] = 2

# Both matches
lofar["lr_1"][lr_i_and_w1] = np.max([lofar["lr_i"][lr_i_and_w1], lofar["lr_w1"][lr_i_and_w1]], axis=0)
lofar["lr_type_1"][lr_i_and_w1] = np.argmax([lofar["lr_i"][lr_i_and_w1], lofar["lr_w1"][lr_i_and_w1]], axis=0) + 1

c1 = (lofar["lr_type_1"] == 1)
c2 = (lofar["lr_type_1"] == 2)
lofar["lr_index_1"][lr_i_and_w1 & c1] = lofar["lr_index_i"][lr_i_and_w1 & c1]
lofar["lr_index_1"][lr_i_and_w1 & c2] = lofar["lr_index_w1"][lr_i_and_w1 & c2]
lofar["lr_dist_1"][lr_i_and_w1 & c1] = lofar["lr_dist_i"][lr_i_and_w1 & c1]
lofar["lr_dist_1"][lr_i_and_w1 & c2] = lofar["lr_dist_w1"][lr_i_and_w1 & c2]

Summary of the number of sources matches of each type


In [110]:
print("match    sel-i: ", np.sum(lofar["lr_type_1"][lr_i_and_w1] == 1))
print("match   sel-W1: ", np.sum(lofar["lr_type_1"][lr_i_and_w1] == 2))
print("match     both: ", np.sum(lofar["lr_type_1"][lr_i_and_w1] == 1) + 
                          np.sum(lofar["lr_type_1"][lr_i_and_w1] == 2))
print("match   i-only: ", np.sum(lofar["lr_type_1"] == 1) - np.sum(lofar["lr_type_1"][lr_i_and_w1] == 1))
print("match  W1-only: ", np.sum(lofar["lr_type_1"] == 2) - np.sum(lofar["lr_type_1"][lr_i_and_w1] == 2))
print("match      all: ", np.sum(lofar["lr_type_1"] == 1) + 
                          np.sum(lofar["lr_type_1"] == 2))
print("         Total: ", len(lofar))


match    sel-i:  1723
match   sel-W1:  41747
match     both:  43470
match   i-only:  3292
match  W1-only:  20462
match      all:  67224
         Total:  91332

In [108]:
print("match    sel-i: ", np.sum(lofar["lr_type_1"][lr_i_and_w1] == 1))
print("match   sel-W1: ", np.sum(lofar["lr_type_1"][lr_i_and_w1] == 2))
print("match     both: ", np.sum(lofar["lr_type_1"][lr_i_and_w1] == 1) + 
                          np.sum(lofar["lr_type_1"][lr_i_and_w1] == 2))
print("match   i-only: ", np.sum(lofar["lr_type_1"] == 1) - np.sum(lofar["lr_type_1"][lr_i_and_w1] == 1))
print("match  W1-only: ", np.sum(lofar["lr_type_1"] == 2) - np.sum(lofar["lr_type_1"][lr_i_and_w1] == 2))
print("match      all: ", np.sum(lofar["lr_type_1"] == 1) + 
                          np.sum(lofar["lr_type_1"] == 2))
print("         Total: ", len(lofar))


match    sel-i:  1330
match   sel-W1:  42095
match     both:  43425
match   i-only:  3337
match  W1-only:  20507
match      all:  67269
         Total:  91332

The number of sources for which the match in i-band and W1-band are above the threshold but gives a different match to the combined catalogue.


In [111]:
print(np.sum(lofar["lr_index_i"][lr_i_and_w1] != lofar["lr_index_w1"][lr_i_and_w1]))


1404

Duplicated sources

This is the nymber of sources of the combined catalogue that are combined to multiple LOFAR sources. In the case of the catalogue of Gaussians the number can be very high.


In [112]:
values, counts = np.unique(lofar[lofar["lr_type_1"] != 0]["lr_index_1"], return_counts=True)

In [113]:
len(values[counts > 1])


Out[113]:
6

In [114]:
n_dup, n_sour = np.unique(counts[counts > 1], return_counts=True)

In [115]:
plt.rcParams["figure.figsize"] = (6,6)
semilogy(n_dup, n_sour, marker="x")
xlabel("Number of multiple matches")
ylabel("Number of sources in the category")


Out[115]:
Text(0,0.5,'Number of sources in the category')

Save intermediate data


In [116]:
if save_intermediate:
    pickle.dump([bin_list_i, center_i, Q0_i, n_m_i1, q_m_i1], 
                open("{}/lofar_params_cumsum_1i.pckl".format(idp), 'wb'))
    pickle.dump([bin_list_w1, center_w1, Q0_w1, n_m_w11, q_m_w11], 
                open("{}/lofar_params_cumsum_1w1.pckl".format(idp), 'wb'))
    pickle.dump([bin_list_i, center_i, Q0_i, n_m_i, q_m_i], 
                open("{}/lofar_params_1i.pckl".format(idp), 'wb'))
    pickle.dump([bin_list_w1, center_w1, Q0_w1, n_m_w1, q_m_w1], 
                open("{}/lofar_params_1w1.pckl".format(idp), 'wb'))
    lofar.write("{}/lofar_m1.fits".format(idp), format="fits")

Second iteration using colour

From now on we will take into account the effect of the colour. The sample was distributed in several categories according to the colour of the source and this is considered here.

Rusable parameters for all the iterations

These parameters are derived from the underlying population and will not change.

First we compute the number of galaxies in each bin for the combined catalogue


In [117]:
bin_list = [bin_list_w1 if i == 0 else bin_list_i for i in range(len(colour_bin_def))]
centers = [center_w1 if i == 0 else center_i for i in range(len(colour_bin_def))]

In [118]:
numbers_combined_bins = np.array([np.sum(a["condition"]) for a in colour_bin_def])

In [119]:
numbers_combined_bins


Out[119]:
array([ 669839, 1778426,  123221,   86953,  127155,   91489,  106076,
        112638,  109949,   98183,   81055,   61913,   44390,   46507,
         14347,    5536])

Get the colour category and magnitudes for the matched LOFAR sources


In [120]:
bandwidth_colour = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
        0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]

In [121]:
n_m = []

# W1 only sources
n_m.append(get_n_m_kde(combined["W1mag"][combined["category"] == 0], 
                       centers[0], field.area, bandwidth=bandwidth_colour[0]))

# Rest of the sources
for i in range(1, len(colour_bin_def)):
    n_m.append(get_n_m_kde(combined["i"][combined["category"] == i], 
                           centers[i], field.area, bandwidth=bandwidth_colour[i]))

In [122]:
n_m_old = []

# W1 only sources
n_m_old.append(get_n_m(combined["W1mag"][combined["category"] == 0], bin_list_w1, field.area))

# Rest of the sources
for i in range(1, len(colour_bin_def)):
    n_m_old.append(get_n_m(combined["i"][combined["category"] == i], bin_list_i, field.area))

In [123]:
plt.rcParams["figure.figsize"] = (15,15)
for i, n_m_k in enumerate(n_m):
    subplot(5,5,i+1)
    plot(centers[i], n_m_old[i])
    plot(centers[i], np.cumsum(n_m_k))


Parameters of the matched sample

The parameters derived from the matched LOFAR galaxies: $q_0$, q(m) and the number of sources per category.

The columns "category", "W1mag" and "i" will contain the properties of the matched galaxies and will be updated in each iteration to save space.


In [124]:
lofar["category"] = np.nan
lofar["W1mag"] = np.nan
lofar["i"] = np.nan

In [125]:
c = ~np.isnan(lofar["lr_index_1"])
indices = lofar["lr_index_1"][c].astype(int)
lofar["category"][c] = combined[indices]["category"]
lofar["W1mag"][c] = combined[indices]["W1mag"]
lofar["i"][c] = combined[indices]["i"]

The next parameter represent the number of matched LOFAR sources in each colour category.


In [126]:
numbers_lofar_combined_bins = np.array([np.sum(lofar["category"] == c) 
                                        for c in range(len(numbers_combined_bins))])

In [127]:
numbers_lofar_combined_bins


Out[127]:
array([19330,  3993,   234,   569,  2325,  3281,  4703,  5232,  5029,
        4553,  4342,  3819,  3287,  4336,  1662,   529])

The $Q_0$ for each category are obtained by dividing the number of sources in the category by the total number of sources in the sample.


In [128]:
Q_0_colour = numbers_lofar_combined_bins/len(lofar) ### Q_0

In [129]:
q0_total = np.sum(Q_0_colour)

In [130]:
q0_total


Out[130]:
0.7360399421889371

The q(m) is not estimated with the method of Fleuren et al. but with the most updated distributions and numbers for the matches.


In [131]:
q_m_old = []
radius = 15. 

# W1 only sources
q_m_old.append(get_q_m(lofar["W1mag"][lofar["category"] == 0], 
                   bin_list_w1, 
                   numbers_lofar_combined_bins[0], 
                   n_m_old[0], 
                   field.area, 
                   radius=radius))

# Rest of the sources
for i in range(1, len(numbers_lofar_combined_bins)):
    q_m_old.append(get_q_m(lofar["i"][lofar["category"] == i], 
                   bin_list_i, 
                   numbers_lofar_combined_bins[i], 
                   n_m_old[i], 
                   field.area, 
                   radius=radius))

In [132]:
q_m = []
radius = 15. 

# W1 only sources
q_m.append(get_q_m_kde(lofar["W1mag"][lofar["category"] == 0], 
                   centers[0], 
                   radius=radius,
                   bandwidth=bandwidth_colour[0]))

# Rest of the sources
for i in range(1, len(numbers_lofar_combined_bins)):
    q_m.append(get_q_m_kde(lofar["i"][lofar["category"] == i], 
                   centers[i], 
                   radius=radius,
                   bandwidth=bandwidth_colour[i]))

In [133]:
plt.rcParams["figure.figsize"] = (15,15)
for i, q_m_k in enumerate(q_m):
    subplot(5,5,i+1)
    plot(centers[i], q_m_old[i])
    plot(centers[i], np.cumsum(q_m_k))



In [131]:
plt.rcParams["figure.figsize"] = (15,15)
for i, q_m_k in enumerate(q_m):
    subplot(5,5,i+1)
    plot(centers[i], q_m_old[i])
    plot(centers[i], np.cumsum(q_m_k))



In [134]:
plt.rcParams["figure.figsize"] = (12,10)

from matplotlib import cm
from matplotlib.collections import LineCollection

cm_subsection = linspace(0., 1., 16) 
colors = [ cm.viridis(x) for x in cm_subsection ]

low = np.nonzero(centers[1] >= 15)[0][0]
high = np.nonzero(centers[1] >= 22.2)[0][0]

fig, a = plt.subplots()

for i, q_m_k in enumerate(q_m):
    #plot(centers[i], q_m_old[i]/n_m_old[i])
    a = subplot(4,4,i+1)
    if i not in [-1]:
        n_m_aux = n_m[i]/np.sum(n_m[i])
        lwidths = (n_m_aux/np.max(n_m_aux)*10).astype(float) + 1
        #print(lwidths)
        
        y_aux = q_m_k/n_m[i]
        factor = np.max(y_aux[low:high])
        y = y_aux
        #print(y)
        x = centers[i]
        
        points = np.array([x, y]).T.reshape(-1, 1, 2)
        segments = np.concatenate([points[:-1], points[1:]], axis=1)
        
        lc = LineCollection(segments, linewidths=lwidths, color=colors[i])
        
        a.add_collection(lc)
        
        #plot(centers[i], x/factor, color=colors[i-1])
        xlim([12, 30])
        if i == 0:
            xlim([10, 23])
        ylim([0, 1.2*factor])

subplots_adjust(left=0.125, 
                bottom=0.1, 
                right=0.9, 
                top=0.9,
                wspace=0.4, 
                hspace=0.2)



In [156]:
plt.rcParams["figure.figsize"] = (12,10)

from matplotlib import cm
from matplotlib.collections import LineCollection

cm_subsection = linspace(0., 1., 16) 
colors = [ cm.viridis(x) for x in cm_subsection ]

low = np.nonzero(centers[1] >= 15)[0][0]
high = np.nonzero(centers[1] >= 22.2)[0][0]

fig, a = plt.subplots()

for i, q_m_k in enumerate(q_m):
    #plot(centers[i], q_m_old[i]/n_m_old[i])
    a = subplot(4,4,i+1)
    if i not in [-1]:
        n_m_aux = n_m[i]/np.sum(n_m[i])
        lwidths = (n_m_aux/np.max(n_m_aux)*10).astype(float) + 1
        #print(lwidths)
        
        y_aux = q_m_k/n_m[i]
        factor = np.max(y_aux[low:high])
        y = y_aux
        #print(y)
        x = centers[i]
        
        points = np.array([x, y]).T.reshape(-1, 1, 2)
        segments = np.concatenate([points[:-1], points[1:]], axis=1)
        
        lc = LineCollection(segments, linewidths=lwidths, color=colors[i])
        
        a.add_collection(lc)
        
        #plot(centers[i], x/factor, color=colors[i-1])
        xlim([12, 30])
        if i == 0:
            xlim([10, 23])
        ylim([0, 1.2*factor])

subplots_adjust(left=0.125, 
                bottom=0.1, 
                right=0.9, 
                top=0.9,
                wspace=0.4, 
                hspace=0.2)


Save intermediate parameters


In [135]:
if save_intermediate:
    pickle.dump([bin_list, centers, Q_0_colour, n_m_old, q_m_old], 
                open("{}/lofar_params_cumsum_2.pckl".format(idp), 'wb'))
    pickle.dump([bin_list, centers, Q_0_colour, n_m, q_m], 
                open("{}/lofar_params_2.pckl".format(idp), 'wb'))

Prepare for ML


In [136]:
selection = ~np.isnan(combined["category"]) # Avoid the two dreaded sources with no actual data
catalogue = combined[selection]

In [137]:
radius = 15

In [138]:
def apply_ml(i, likelihood_ratio_function):
    idx_0 = idx_i[idx_lofar == i]
    d2d_0 = d2d[idx_lofar == i]
    
    category = catalogue["category"][idx_0].astype(int)
    mag = catalogue["i"][idx_0]
    mag[category == 0] = catalogue["W1mag"][idx_0][category == 0]
    
    lofar_ra = lofar[i]["RA"]
    lofar_dec = lofar[i]["DEC"]
    lofar_pa = lofar[i]["PA"]
    lofar_maj_err = lofar[i]["E_Maj"]
    lofar_min_err = lofar[i]["E_Min"]
    c_ra = catalogue["ra"][idx_0]
    c_dec = catalogue["dec"][idx_0]
    c_ra_err = catalogue["raErr"][idx_0]
    c_dec_err = catalogue["decErr"][idx_0]
    
    sigma, sigma_maj, sigma_min = get_sigma_all(lofar_maj_err, lofar_min_err, lofar_pa, 
                      lofar_ra, lofar_dec, 
                      c_ra, c_dec, c_ra_err, c_dec_err)

    lr_0 = likelihood_ratio_function(mag, d2d_0.arcsec, sigma, sigma_maj, sigma_min, category)
    
    chosen_index = np.argmax(lr_0)
    result = [combined_aux_index[selection][idx_0[chosen_index]], # Index
              (d2d_0.arcsec)[chosen_index],                        # distance
              lr_0[chosen_index]]                                  # LR
    return result

Run the cross-match

This will not need to be repeated after


In [139]:
idx_lofar, idx_i, d2d, d3d = search_around_sky(
    coords_lofar, coords_combined[selection], radius*u.arcsec)

In [140]:
idx_lofar_unique = np.unique(idx_lofar)

Run the ML matching


In [141]:
likelihood_ratio = MultiMLEstimatorU(Q_0_colour, n_m, q_m, centers)

In [142]:
def ml(i):
    return apply_ml(i, likelihood_ratio)

In [143]:
res = parallel_process(idx_lofar_unique, ml, n_jobs=n_cpus)


100%|██████████| 87.9k/87.9k [05:54<00:00, 248it/s]
100%|██████████| 87863/87863 [00:00<00:00, 386077.13it/s]

In [144]:
lofar["lr_index_2"] = np.nan
lofar["lr_dist_2"] = np.nan
lofar["lr_2"] = np.nan

In [145]:
(lofar["lr_index_2"][idx_lofar_unique], 
 lofar["lr_dist_2"][idx_lofar_unique], 
 lofar["lr_2"][idx_lofar_unique]) = list(map(list, zip(*res)))

Get the new threshold for the ML matching. FIX THIS


In [146]:
lofar["lr_2"][np.isnan(lofar["lr_2"])] = 0

In [147]:
threshold = np.percentile(lofar["lr_2"], 100*(1 - q0_total))
#manual_q0 = 0.65
#threshold = np.percentile(lofar["lr_2"], 100*(1 - manual_q0))

In [148]:
threshold # Old: 0.69787


Out[148]:
0.7168394527247878

In [171]:
plt.rcParams["figure.figsize"] = (15,6)
subplot(1,2,1)
hist(lofar[lofar["lr_2"] != 0]["lr_2"], bins=200)
vlines([threshold], 0, 1000)
ylim([0,1000])
subplot(1,2,2)
hist(np.log10(lofar[lofar["lr_2"] != 0]["lr_2"]+1), bins=200)
vlines(np.log10(threshold+1), 0, 1000)
ticks, _ = xticks()
xticks(ticks, ["{:.1f}".format(10**t-1) for t in ticks])
ylim([0,1000]);



In [154]:
lofar["lr_index_sel_2"] = lofar["lr_index_2"]
lofar["lr_index_sel_2"][lofar["lr_2"] < threshold] = np.nan

In [155]:
n_changes = np.sum((lofar["lr_index_sel_2"] != lofar["lr_index_1"]) & 
                   ~np.isnan(lofar["lr_index_sel_2"]) &
                   ~np.isnan(lofar["lr_index_1"]))

In [174]:
n_changes # Old: 382


Out[174]:
382

Enter the results


In [157]:
# Clear aux columns
lofar["category"] = np.nan
lofar["W1mag"] = np.nan
lofar["i"] = np.nan

c = ~np.isnan(lofar["lr_index_sel_2"])
indices = lofar["lr_index_sel_2"][c].astype(int)
lofar["category"][c] = combined[indices]["category"]
lofar["W1mag"][c] = combined[indices]["W1mag"]
lofar["i"][c] = combined[indices]["i"]

In [158]:
numbers_lofar_combined_bins = np.array([np.sum(lofar["category"] == c) 
                                        for c in range(len(numbers_combined_bins))])

In [177]:
numbers_lofar_combined_bins


Out[177]:
array([19576,  3768,   111,   520,  2295,  3281,  4699,  5246,  5053,
        4567,  4369,  3851,  3312,  4403,  1679,   539])

Save intermediate data


In [160]:
if save_intermediate:
    lofar.write("{}/lofar_m2.fits".format(idp), format="fits")

Iterate until convergence


In [161]:
rerun_iter = False

In [185]:
if rerun_iter:
    lofar = Table.read("{}/lofar_m2.fits".format(idp))
    bin_list, centers, Q_0_colour, n_m, q_m = pickle.load(open("{}/lofar_params_2.pckl".format(idp), 'rb'))
    inter_data_list = glob("{}/lofar_m*.fits".format(idp))
    # Remove data
    for inter_data_file in inter_data_list:
        if inter_data_file[-7:-5] not in ["m1", "m2"]:
            #print(inter_data_file)
            os.remove(inter_data_file)
    # Remove images
    images_list = glob("{}/*.png".format(idp))
    for images in images_list:
        #print(images)
        os.remove(images)
    # Remove parameters
    inter_param_list = glob("{}/lofar_params_*.pckl".format(idp))
    for inter_param_file in inter_param_list:
        if inter_param_file[-7:-5] not in ["1i", "w1", "_2"]:
            #print(inter_param_file)
            os.remove(inter_param_file)

In [163]:
radius = 15.

In [164]:
from matplotlib import pyplot as plt

In [165]:
def plot_q_n_m(q_m, n_m):
    fig, a = plt.subplots()

    for i, q_m_k in enumerate(q_m):
        #plot(centers[i], q_m_old[i]/n_m_old[i])
        a = subplot(4,4,i+1)
        if i not in [-1]:
            n_m_aux = n_m[i]/np.sum(n_m[i])
            lwidths = (n_m_aux/np.max(n_m_aux)*10).astype(float) + 1
            #print(lwidths)

            y_aux = q_m_k/n_m[i]
            factor = np.max(y_aux[low:high])
            y = y_aux
            #print(y)
            x = centers[i]

            points = np.array([x, y]).T.reshape(-1, 1, 2)
            segments = np.concatenate([points[:-1], points[1:]], axis=1)

            lc = LineCollection(segments, linewidths=lwidths, color=colors[i])

            a.add_collection(lc)

            #plot(centers[i], x/factor, color=colors[i-1])
            xlim([12, 30])
            if i == 0:
                xlim([10, 23])
            ylim([0, 1.2*factor])

    subplots_adjust(left=0.125, 
                    bottom=0.1, 
                    right=0.9, 
                    top=0.9,
                    wspace=0.4, 
                    hspace=0.2)
    return fig

In [189]:
for j in range(10):
    iteration = j+3 
    print("Iteration {}".format(iteration))
    print("=============")
    ## Get new parameters
    # Number of matched sources per bin
    numbers_lofar_combined_bins = np.array([np.sum(lofar["category"] == c) 
                                            for c in range(len(numbers_combined_bins))])
    print("numbers_lofar_combined_bins")
    print(numbers_lofar_combined_bins)
    # q_0
    Q_0_colour_est = numbers_lofar_combined_bins/len(lofar) ### Q_0
    Q_0_colour = q0_min_level(Q_0_colour_est, min_level=0.001)
    print("Q_0_colour")
    print(Q_0_colour)
    q0_total = np.sum(Q_0_colour)
    print("Q_0_total: ", q0_total)
    # q_m_old
    q_m_old = []
    # W1 only sources
    q_m_old.append(get_q_m(lofar["W1mag"][lofar["category"] == 0], 
                   bin_list_w1, numbers_lofar_combined_bins[0], 
                   n_m_old[0], field.area, radius=radius))
    # Rest of the sources
    for i in range(1, len(numbers_lofar_combined_bins)):
        q_m_old.append(get_q_m(lofar["i"][lofar["category"] == i], 
                       bin_list_i, numbers_lofar_combined_bins[i],
                       n_m_old[i], field.area, radius=radius))
    # q_m
    q_m = []
    # W1 only sources
    q_m.append(get_q_m_kde(lofar["W1mag"][lofar["category"] == 0], 
                   centers[0], radius=radius, bandwidth=bandwidth_colour[0]))
    # Rest of the sources
    for i in range(1, len(numbers_lofar_combined_bins)):
        q_m.append(get_q_m_kde(lofar["i"][lofar["category"] == i], 
                       centers[i], radius=radius, bandwidth=bandwidth_colour[i]))
    # Save new parameters
    if save_intermediate:
        pickle.dump([bin_list, centers, Q_0_colour, n_m_old, q_m_old], 
                    open("{}/lofar_params_cumsum_{}.pckl".format(idp, iteration), 'wb'))
        pickle.dump([bin_list, centers, Q_0_colour, n_m, q_m], 
                    open("{}/lofar_params_{}.pckl".format(idp, iteration), 'wb'))
    if plot_intermediate:
        fig = plt.figure(figsize=(15,15))
        for i, q_m_k in enumerate(q_m):
            plt.subplot(5,5,i+1)
            plt.plot(centers[i], q_m_k)
        plt.savefig('{}/q0_{}.png'.format(idp, iteration))
        del fig
        fig = plt.figure(figsize=(15,15))
        for i, q_m_k in enumerate(q_m):
            plt.subplot(5,5,i+1)
            plt.plot(centers[i], q_m_k/n_m[i])
        plt.savefig('{}/q_over_n_{}.png'.format(idp, iteration))
        del fig
        fig = plot_q_n_m(q_m, n_m)
        plt.savefig('{}/q_over_n_nice_{}.png'.format(idp, iteration))
        del fig
    ## Define new likelihood_ratio
    likelihood_ratio = MultiMLEstimatorU(Q_0_colour, n_m, q_m, centers)
    def ml(i):
        return apply_ml(i, likelihood_ratio)
    ## Run the ML
    res = parallel_process(idx_lofar_unique, ml, n_jobs=n_cpus)
    lofar["lr_index_{}".format(iteration)] = np.nan
    lofar["lr_dist_{}".format(iteration)] = np.nan
    lofar["lr_{}".format(iteration)] = np.nan
    (lofar["lr_index_{}".format(iteration)][idx_lofar_unique], 
     lofar["lr_dist_{}".format(iteration)][idx_lofar_unique], 
     lofar["lr_{}".format(iteration)][idx_lofar_unique]) = list(map(list, zip(*res)))
    lofar["lr_{}".format(iteration)][np.isnan(lofar["lr_{}".format(iteration)])] = 0
    ## Get and apply the threshold
    threshold = np.percentile(lofar["lr_{}".format(iteration)], 100*(1 - q0_total))
    #threshold = get_threshold(lofar[lofar["lr_{}".format(iteration)] != 0]["lr_{}".format(iteration)])
    print("Threshold: ", threshold)
    if plot_intermediate:
        fig = plt.figure(figsize=(15,6))
        plt.subplot(1,2,1)
        plt.hist(lofar[lofar["lr_{}".format(iteration)] != 0]["lr_{}".format(iteration)], bins=200)
        plt.vlines([threshold], 0, 1000)
        plt.ylim([0,1000])
        plt.subplot(1,2,2)
        plt.hist(np.log10(lofar[lofar["lr_{}".format(iteration)] != 0]["lr_{}".format(iteration)]+1), bins=200)
        plt.vlines(np.log10(threshold+1), 0, 1000)
        ticks, _ = plt.xticks()
        plt.xticks(ticks, ["{:.1f}".format(10**t-1) for t in ticks])
        plt.ylim([0,1000])
        plt.savefig('{}/lr_distribution_{}.png'.format(idp, iteration))
        del fig
    ## Apply the threshold
    lofar["lr_index_sel_{}".format(iteration)] = lofar["lr_index_{}".format(iteration)]
    lofar["lr_index_sel_{}".format(iteration)][lofar["lr_{}".format(iteration)] < threshold] = np.nan
    ## Enter changes into the catalogue
    # Clear aux columns
    lofar["category"] = np.nan
    lofar["W1mag"] = np.nan
    lofar["i"] = np.nan
    # Update data
    c = ~np.isnan(lofar["lr_index_sel_{}".format(iteration)])
    indices = lofar["lr_index_sel_{}".format(iteration)][c].astype(int)
    lofar["category"][c] = combined[indices]["category"]
    lofar["W1mag"][c] = combined[indices]["W1mag"]
    lofar["i"][c] = combined[indices]["i"]
    # Save the data
    if save_intermediate:
        lofar.write("{}/lofar_m{}.fits".format(idp, iteration), format="fits")
    ## Compute number of changes
    n_changes = np.sum((
            lofar["lr_index_sel_{}".format(iteration)] != lofar["lr_index_sel_{}".format(iteration-1)]) & 
            ~np.isnan(lofar["lr_index_sel_{}".format(iteration)]) &
            ~np.isnan(lofar["lr_index_sel_{}".format(iteration-1)]))
    print("N changes: ", n_changes)
    t_changes = np.sum((
            lofar["lr_index_sel_{}".format(iteration)] != lofar["lr_index_sel_{}".format(iteration-1)]))
    print("T changes: ", t_changes)
    ## Check changes
    if n_changes == 0:
        break
    else:
        print("******** continue **********")


Iteration 3
=============
numbers_lofar_combined_bins
[19576  3768   111   520  2295  3281  4699  5246  5053  4567  4369  3851
  3312  4403  1679   539]
Q_0_colour
[0.2143389  0.04125608 0.00121535 0.00569351 0.0251281  0.03592388
 0.05144966 0.05743879 0.05532563 0.05000438 0.04783646 0.04216485
 0.0362633  0.04820873 0.01838348 0.00590155]
Q_0_total:  0.7365326501116805
100%|██████████| 87.9k/87.9k [05:54<00:00, 248it/s]
100%|██████████| 87863/87863 [00:00<00:00, 385998.68it/s]
Threshold:  0.671539055471364
N changes:  13
T changes:  24104
******** continue **********
Iteration 4
=============
numbers_lofar_combined_bins
[19598  3745    95   519  2295  3282  4699  5248  5054  4570  4371  3851
  3313  4409  1680   540]
Q_0_colour
[0.21457977 0.04100425 0.00104016 0.00568256 0.0251281  0.03593483
 0.05144966 0.05746069 0.05533657 0.05003723 0.04785836 0.04216485
 0.03627425 0.04827443 0.01839443 0.0059125 ]
Q_0_total:  0.7365326501116805
100%|██████████| 87.9k/87.9k [05:51<00:00, 250it/s]
100%|██████████| 87863/87863 [00:00<00:00, 396115.99it/s]
Threshold:  0.6669766687372396
N changes:  3
T changes:  24074
******** continue **********
Iteration 5
=============
numbers_lofar_combined_bins
[19604  3743    88   519  2295  3282  4699  5248  5055  4570  4371  3851
  3314  4410  1680   540]
Q_0_colour
[0.21464547 0.04098235 0.001      0.00568256 0.0251281  0.03593483
 0.05144966 0.05746069 0.05534752 0.05003723 0.04785836 0.04216485
 0.0362852  0.04828538 0.01839443 0.0059125 ]
Q_0_total:  0.7365691323960933
100%|██████████| 87.9k/87.9k [05:55<00:00, 247it/s]
100%|██████████| 87863/87863 [00:00<00:00, 397191.00it/s]
Threshold:  0.6636222443922981
N changes:  0
T changes:  24067

In [164]:
numbers_lofar_combined_bins = np.array([np.sum(lofar["category"] == c) 
                                        for c in range(len(numbers_combined_bins))])
numbers_lofar_combined_bins


Out[164]:
array([19588,  3729,    83,   518,  2295,  3282,  4700,  5248,  5054,
        4569,  4370,  3851,  3313,  4410,  1679,   540])

In [165]:
if save_intermediate:
    pickle.dump([numbers_lofar_combined_bins, numbers_combined_bins], 
                open("{}/numbers_{}.pckl".format(idp, iteration), 'wb'))

In [167]:
good = False

In [193]:
if good:
    if os.path.exists("lofar_params.pckl"):
        os.remove("lofar_params.pckl")
    copyfile("{}/lofar_params_{}.pckl".format(idp, iteration), "lofar_params.pckl")

In [ ]: