These Oxidation Pool notebooks parallelize the oxidation finding routine for the dataset


In [4]:
import pymatgen_pars as pymt
import numpy as np
import pymatgen.analysis.bond_valence as bv
from collections import Counter

In [5]:
st=pymt.read_unique_data("unique_data.json")

In [6]:
import tqdm
st_masked=[i for i in st if np.all([a in bv.BV_PARAMS for a in i.composition.elements])]
BV=bv.BVAnalyzer()

In [7]:
valency4=[]
True_vals=[]
for i in tqdm.tqdm_notebook(st_masked[15000:20000]):
    try:
        valency4.append(BV.get_valences(i))
        True_vals.append(i)
    except:
        pass




In [8]:
import pickle

with open("val4.pickle","w") as f:
    pickle.dump(valency4,f)

with open("True4.pickle","w") as f:
    pickle.dump(True_vals,f)

In [ ]: