In [1]:
from __future__ import print_function
import openpathsampling as paths
import numpy as np
import openpathsampling.engines.openmm as peng
In [2]:
# force numpy print options for test comparison
np.set_printoptions(precision=6, formatter={'float_kind': lambda x: "{:.6f}".format(x)})
Load first frame from test pdb.
In [3]:
template = peng.snapshot_from_pdb('../resources/AD_initial_frame.pdb')
Create a simple CV without an underlying function.
In [4]:
cv0 = paths.CollectiveVariable('func0')
Create a complicated function.
In [5]:
center = 1
def dist(snapshot, center, np):
return np.sum(snapshot.coordinates._value[0]) - center
Create collective variable from this function. Note that you have to specify center
and np
to make this work
In [6]:
cv1 = paths.FunctionCV('func1', dist, center=center, np=np, cv_time_reversible=False).with_diskcache()
cv2 = paths.FunctionCV('func2', dist, center=center, np=np, cv_wrap_numpy_array=True).with_diskcache()
cv3 = paths.FunctionCV('func3', dist, center=center, np=np, cv_wrap_numpy_array=True, cv_time_reversible=True).with_diskcache()
Create storage to test save and load.
In [7]:
#! lazy
storage = paths.Storage('can_be_deleted.nc', mode='w')
print(storage.snapshots.save(template))
Save CV
In [8]:
#! lazy
print(storage.save([cv0, cv1, cv2, cv3]))
In [9]:
#! lazy
print(storage.cvs.index)
Set the CV value for the storage.template
In [10]:
cv0[template] = 10.0
and create a storage for it
In [11]:
storage.cvs.add_diskcache(cv0, allow_incomplete=True)
And we should have a float store
In [12]:
storage.cvs.sync(cv0)
In [13]:
assert(cv0._store_dict.value_store.vars['value'][:] == [10.0])
Test function for reversed template.
In [14]:
dd = dist(template.reversed, center, np)
print("{:.8f}".format(dd))
Create another CV. This time using the from_template
function.
In [15]:
print(cv0([template, template]))
print(cv1([template, template]))
In [16]:
print(type(cv0([template, template])))
print(type(cv0([template, template])[0]))
print(type(cv1([template, template])))
print(type(cv1([template, template])[0]))
In [17]:
#! skip
print(storage.cvs.variables['json'][:])
In [18]:
cv0j = storage.cvs.vars['json'][0]
cv1j = storage.cvs.vars['json'][1]
cv2j = storage.cvs.vars['json'][2]
cv3j = storage.cvs.vars['json'][3]
In [19]:
res = cv0j([template, template, template])
assert(res[0] == res[1] == res[2])
res = cv1j([template, template, template])
assert(res[0] == res[1] == res[2])
res = cv2j([template, template, template])
assert(res[0] == res[1] == res[2])
res = cv3j([template, template, template])
assert(res[0] == res[1] == res[2])
In [20]:
t = paths.Trajectory([template] * 4)
In [21]:
#! lazy
print(storage.save(t))
In [22]:
def ff(t, cv3):
return max(cv3(t))
In [23]:
a = paths.netcdfplus.FunctionPseudoAttribute('max_cv', paths.Trajectory, ff, cv3=cv3)
In [24]:
#! lazy
print(storage.attributes.save(a))
In [25]:
print("{:.8f}".format(a(t)))
In [26]:
p = paths.netcdfplus.LoaderProxy(storage.trajectories, t.__uuid__)
In [27]:
print("{:.8f}".format(a(p)))
In [28]:
print(storage.trajectories.add_attribute(paths.netcdfplus.ValueStore, a, t, allow_incomplete=False))
In [29]:
ats = storage.trajectories.attribute_list[a]
In [30]:
print(ats.vars['value'][:])
In [31]:
print(a._store_dict.value_store.vars['value'][:])
In [32]:
storage.trajectories.sync_attribute(a)
In [33]:
storage.attributes.has_cache(a)
Out[33]:
In [34]:
storage.attributes.sync(a)
In [35]:
storage.close()
In [36]:
storage = paths.Storage('can_be_deleted.nc', mode='r')
In [37]:
storage.attributes[4]
Out[37]:
In [38]:
storage.close()
In [ ]: