In [1]:
import os
import sys
import random
import time
from random import seed, randint
import argparse
import platform
from datetime import datetime
import imp
import numpy as np
import fileinput
from itertools import product
import pandas as pd
from scipy.interpolate import griddata
from scipy.interpolate import interp2d
import seaborn as sns
from os import listdir
import matplotlib as mpl
import matplotlib.pyplot as plt
%matplotlib inline
# %matplotlib notebook
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# plt.rcParams['figure.figsize'] = (10,5)
# plt.rcParams['figure.figsize'] = (10,5.625) # 16:9
plt.rcParams['figure.figsize'] = (10,6.180) #golden ratio
# plt.rcParams['figure.figsize'] = (10*2,6.180*2) #golden ratio
In [2]:
data = pd.read_feather("/Users/weilu/Research/server/mar_2018/sixth/rg_0.15_lipid_1.0_mem_1_go_0.8/rerun_7_24_Mar_173616.feather")
In [3]:
# dic = {"T0":280, "T1":290, "T2":300, "T3":310, "T4":320, "T5":335, "T6":350, "T7":365, "T8":380, "T9":410, "T10":440, "T11":470}
dic = {"T0":280, "T1":300, "T2":325, "T3":350, "T4":375, "T5":400, "T6":450, "T7":500, "T8":550, "T9":600, "T10":650, "T11":700}
a = data
a["Temp"] = a["Temp"].apply(lambda x: dic[x])
t = a.query("Temp < 400").groupby(["BiasTo","Temp"])[["DisReal","Run"]].mean().reset_index()
t["Diff"] = t["DisReal"]-t["BiasTo"].apply(pd.to_numeric)
t["BiasTo"] = t["BiasTo"].apply(pd.to_numeric)
fg = sns.FacetGrid(data=t, hue='Temp', size=8, aspect=1.61)
fg.map(plt.scatter, 'BiasTo', 'Diff').add_legend()
Out[3]:
In [16]:
a.query("Temp == 300 and DisReal > 60 and DisReal < 90").plot.hexbin("z_h5", "z_h6", cmap="seismic", sharex=False)
Out[16]:
In [9]:
a.query("Temp == 280 and DisReal > 60 and DisReal < 90").plot.hexbin("z_h1", "z_h6", cmap="seismic", sharex=False)
Out[9]:
In [17]:
a.query("Temp == 300 and DisReal > 60 and DisReal < 90 and z_h3 > -10 and z_h6 > -10").groupby(["BiasTo", "Run"])["DisReal"].describe()
Out[17]:
In [7]:
a.query("Temp == 300 and DisReal > 60 and DisReal < 90").plot.hexbin("z_h3", "z_h6", cmap="seismic", sharex=False)
Out[7]:
In [143]:
data = pd.read_feather("/Users/weilu/Research/server/mar_2018/eighth/force_0.03_rg_0.15_lipid_1.0_mem_1_go_0.8/rerun_3_30_Mar_135549.feather")
In [144]:
dic = {"T0":280, "T1":290, "T2":300, "T3":310, "T4":320, "T5":335, "T6":350, "T7":365, "T8":380, "T9":410, "T10":440, "T11":470}
a = data
a["Temp"] = a["Temp"].apply(lambda x: dic[x])
t = a.query("Temp < 400").groupby(["BiasTo","Temp"])[["DisReal","Run"]].mean().reset_index()
t["Diff"] = t["DisReal"]-t["BiasTo"].apply(pd.to_numeric)
t["BiasTo"] = t["BiasTo"].apply(pd.to_numeric)
fg = sns.FacetGrid(data=t, hue='Temp', size=8, aspect=1.61)
fg.map(plt.scatter, 'BiasTo', 'Diff').add_legend()
Out[144]:
In [145]:
a.query("Temp == 300").plot.hexbin("DisReal", "Qw", cmap="seismic", sharex=False)
Out[145]:
In [151]:
a.query("Temp == 300 and DisReal > 60").plot.hexbin("Qw", "z_h1", cmap="seismic", sharex=False)
Out[151]:
In [159]:
a.query("Temp == 300 and DisReal > 60 and DisReal < 90").plot.hexbin("Qw", "z_h3", cmap="seismic", sharex=False)
Out[159]:
In [161]:
a.query("Temp == 300 and DisReal > 60 and DisReal < 90").plot.hexbin("z_h3", "z_h6", cmap="seismic", sharex=False)
Out[161]:
In [153]:
a.query("Temp == 300 and DisReal > 60 and DisReal < 90").plot.hexbin("DisReal", "TotalE", cmap="seismic", sharex=False)
Out[153]:
In [139]:
b = a.query("BiasTo == '92.0'").groupby(["Run", "Temp"])["Step"].count().reset_index()
c = b.pivot(index="Run", columns="Temp", values="Step").reset_index()
c
Out[139]:
In [50]:
data = pd.read_feather("/Users/weilu/Research/server/mar_2018/eighth/force_0.03_rg_0.15_lipid_1.0_mem_1_go_0.8/rerun_1_29_Mar_152326.feather")
In [51]:
dic = {"T0":280, "T1":290, "T2":300, "T3":310, "T4":320, "T5":335, "T6":350, "T7":365, "T8":380, "T9":410, "T10":440, "T11":470}
a = data
a["Temp"] = a["Temp"].apply(lambda x: dic[x])
t = a.query("Temp < 400").groupby(["BiasTo","Temp"])[["DisReal","Run"]].mean().reset_index()
t["Diff"] = t["DisReal"]-t["BiasTo"].apply(pd.to_numeric)
t["BiasTo"] = t["BiasTo"].apply(pd.to_numeric)
fg = sns.FacetGrid(data=t, hue='Temp', size=8, aspect=1.61)
fg.map(plt.scatter, 'BiasTo', 'Diff').add_legend()
Out[51]:
In [65]:
a.query("Temp == 300").groupby(["Run", "BiasTo"])["TotalE"].mean().hist()
Out[65]:
In [133]:
a.query("Qw > 0.6 and Temp < 290 and TotalE < -990")
Out[133]:
In [131]:
t = a.query("Qw > 0.6 and Temp < 290 and TotalE > -900")
t.query("BiasTo == '80.0' and Run == 8")
# t.query("BiasTo == '80.0' and Run == 8").to_csv("/Users/weilu/Research/server/mar_2018/05_week/pick_structure/highE.csv")
Out[131]:
In [132]:
a.query("Qw > 0.6 and Temp < 290 and TotalE < -990").groupby(["BiasTo", "Run"]).count()
Out[132]:
In [124]:
a.query("Qw > 0.6").groupby("Temp")["TotalE"].describe().reset_index()
Out[124]:
In [123]:
a.query("Qw < 0.6").groupby("Temp")["TotalE"].describe().reset_index()
Out[123]:
In [97]:
tt = a.query("Qw > 0.6").groupby("Temp")["TotalE"].describe().reset_index()
In [98]:
lm_native = np.polyfit(tt.Temp, tt["mean"], 1)
In [120]:
x = np.linspace(280, 350, 100)
In [121]:
plt.plot(x, x*lm_native[0] + lm_native[1])
plt.plot(x, x*lm_original[0] + lm_original[1], c="red")
Out[121]:
In [100]:
lm_native
Out[100]:
In [99]:
lm_original
Out[99]:
In [104]:
a.query("Qw > 0.6").groupby("Temp")["TotalE"].describe().reset_index().plot("Temp", "mean")
plt.plot(x, x*lm_native[0] + lm_native[1])
Out[104]:
In [70]:
a.query("Temp == 280 and Qw > 0.6")["TotalE"].describe()
Out[70]:
In [69]:
a.query("Temp == 290 and Qw > 0.6")["TotalE"].describe()
Out[69]:
In [44]:
a.query("Temp == 300").plot.hexbin("DisReal", "Qw", cmap="seismic", sharex=False)
Out[44]:
In [107]:
tt= a.query("z_h6 < -10 and Qw > 0.18 and DisReal > 60 and z_h4 > -10").plot.hexbin("DisReal", "Qw", cmap="seismic", sharex=False)
In [108]:
tt= a.query("z_h6 < -10 and Qw > 0.18 and DisReal > 60 and z_h4 > -10")
In [118]:
tt= a.query("z_h6 > -10 and Qw > 0.18 and DisReal > 60 and z_h1 < -10")
tt.groupby(["BiasTo", "Run"])["TotalE"].describe().query("count > 500")
Out[118]:
In [119]:
# tt.query("BiasTo == '82.0'").query("Run == 2 or Run == 3").groupby(["Run","Temp"])["TotalE"].describe()
In [113]:
tt.groupby(["BiasTo", "Run"])["TotalE"].describe().query("count > 100")
Out[113]:
In [110]:
tt.groupby(["BiasTo", "Run"])["DisReal"].describe().query("count > 100")
Out[110]:
In [96]:
ttt= tt.groupby("Temp")["TotalE"].describe().reset_index()
lm_original = np.polyfit(ttt.Temp, ttt["mean"], 1)
print(lm_original)
tt.groupby("Temp")["TotalE"].describe().reset_index().plot("Temp", "mean")
Out[96]:
In [134]:
data = pd.read_feather("/Users/weilu/Research/server/mar_2018/05_week/unfold/29_Mar_230845.feather")
In [135]:
data.query("Steps < 1e7").plot.hexbin("Steps", "Qw", cmap="cool", sharex=False)
Out[135]:
In [2]:
data = pd.read_feather("/Users/weilu/Research/server/mar_2018/05_week/unfold/28_Mar_163824.feather")
In [27]:
69.7*0.35
Out[27]:
In [29]:
1/69.7*2
Out[29]:
In [9]:
data.columnsumns
Out[9]:
In [22]:
data.query("Steps < 0.8e7").plot.hexbin("Steps", "Qw", cmap="cool", sharex=False)
Out[22]:
In [17]:
data.query("Steps < 0.75e7").query("DisReal < 100").plot.hexbin("Steps", "DisReal", cmap="cool", sharex=False)
Out[17]:
In [25]:
data.query("Qw < 0.4 and Qw > 0.2").plot.hexbin("Steps", "Qw", cmap="cool", sharex=False)
Out[25]:
In [26]:
data.query("Qw < 0.4 and Qw > 0.2").mean()
Out[26]:
In [ ]: