These Oxidation Pool notebooks parallelize the oxidation finding routine for the dataset


In [1]:
import pymatgen_pars as pymt
import numpy as np
import pymatgen.analysis.bond_valence as bv
from collections import Counter

In [2]:
st=pymt.read_unique_data("unique_data.json")

In [3]:
import tqdm
st_masked=[i for i in st if np.all([a in bv.BV_PARAMS for a in i.composition.elements])]
BV=bv.BVAnalyzer()

In [4]:
valency3=[]
True_vals=[]
for i in tqdm.tqdm_notebook(st_masked[10000:15000]):
    try:
        valency3.append(BV.get_valences(i))
        True_vals.append(i)
    except:
        pass




In [6]:
import pickle

with open("val3.pickle","w") as f:
    pickle.dump(valency3,f)

with open("True3.pickle","w") as f:
    pickle.dump(True_vals,f)

In [ ]: