In [1]:
import os
import sys
import random
import time
from random import seed, randint
import argparse
import platform
from datetime import datetime
import imp
import numpy as np
import fileinput
from itertools import product
import pandas as pd
from scipy.interpolate import griddata
from scipy.interpolate import interp2d
import seaborn as sns
from os import listdir
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.interpolate import griddata
import matplotlib as mpl
# sys.path.insert(0,'..')
# from notebookFunctions import *
# from .. import notebookFunctions
from Bio.PDB.Polypeptide import one_to_three
from Bio.PDB.Polypeptide import three_to_one
from Bio.PDB.PDBParser import PDBParser
from pyCodeLib import *
# from small_script.myFunctions import *
sys.path.insert(0, "/Users/weilu/openmmawsem")
from helperFunctions.myFunctions import *
from collections import defaultdict
%matplotlib inline
# plt.rcParams['figure.figsize'] = (10,6.180) #golden ratio
# %matplotlib notebook
%load_ext autoreload
%autoreload 2
In [2]:
plt.rcParams['figure.figsize'] = np.array([16.18033, 10]) #golden ratio
plt.rcParams['figure.facecolor'] = 'w'
plt.rcParams['figure.dpi'] = 100
plt.rcParams.update({'font.size': 22})
In [3]:
def mycp(source, target):
os.system(f"cp {source} {target}")
do = os.system
def getSeq(fileLocation):
p = PDBParser()
s = p.get_structure("test", fileLocation)
seq = ""
residues = list(s.get_residues())
for residue in residues:
res_id = residue.get_id()[0]
if res_id==' ':
residue_name = residue.get_resname()
seq += three_to_one(residue_name)
return seq
# get chains nad seq
def getChainsAndSeq(fileLocation):
# fileLocation = "/Users/weilu/Research/examples/optimization/optimization/Structure_Ensemble/1.pdb"
p = PDBParser()
pdb = p.get_structure("test", fileLocation)
residues = list(pdb.get_residues())
seq = ""
chains = ""
for residue in residues:
res_id = residue.get_id()[0]
chain = residue.get_full_id()[2]
if res_id==' ':
residue_name = residue.get_resname()
seq += three_to_one(residue_name)
chains += chain
return chains, seq
In [21]:
# sainity check.
# I want to ensure every native pdb has the same seq.
fileLocation = "/Users/weilu/Research/server/nov_2019/organize_optimization/8_Rosetta_PDB/1_pdb/1.pdb"
chains, seq1 = getChainsAndSeq(fileLocation)
In [8]:
print(len(chains))
In [22]:
seq1
Out[22]:
In [23]:
fileLocation = "/Users/weilu/Research/server/nov_2019/organize_optimization/8_Rosetta_PDB/2_pdb/1.pdb"
chains, seq2 = getChainsAndSeq(fileLocation)
In [ ]:
fileLocation = "/Users/weilu/Research/server/nov_2019/organize_optimization/8_Rosetta_PDB/2_pdb/1.pdb"
chains, seq2 = getChainsAndSeq(fileLocation)
In [25]:
fileLocation = "/Users/weilu/Research/server/nov_2019/organize_optimization/8_Rosetta_PDB/native_pdb/1.pdb"
chains, nativeseq1 = getChainsAndSeq(fileLocation)
In [26]:
fileLocation = "/Users/weilu/Research/server/nov_2019/organize_optimization/8_Rosetta_PDB/native_pdb/2.pdb"
chains, nativeseq2 = getChainsAndSeq(fileLocation)
In [29]:
nativeseq1 == nativeseq2
Out[29]:
In [39]:
getSeq(fileLocation)
Out[39]:
In [24]:
seq2
Out[24]:
In [32]:
# all pdb in native has the same seqs.
fileLocation = "/Users/weilu/Research/server/nov_2019/organize_optimization/8_Rosetta_PDB/native_pdb/1.pdb"
first_chains, first_seq = chains, seq = getChainsAndSeq(fileLocation)
a = list(range(1, 1001))
# for i in np.random.choice(a, 10):
for i in list(range(1, 90)):
fileLocation = f"/Users/weilu/Research/server/nov_2019/organize_optimization/8_Rosetta_PDB/native_pdb/{i}.pdb"
chains, seq = getChainsAndSeq(fileLocation)
assert chains == first_chains
if seq == first_seq:
pass
else:
print(i, seq, first_seq)
In [16]:
np.random.choice(a, 10)
Out[16]:
In [34]:
# every pdb with same name under different folder should have the same sequence.
fileLocation = "/Users/weilu/Research/server/nov_2019/organize_optimization/8_Rosetta_PDB/1_pdb/1.pdb"
first_chains, first_seq = chains, seq = getChainsAndSeq(fileLocation)
a = list(range(1, 91))
for i in np.random.choice(a, 10):
fileLocation = f"/Users/weilu/Research/server/nov_2019/organize_optimization/8_Rosetta_PDB/{i}_pdb/1.pdb"
chains, seq = getChainsAndSeq(fileLocation)
assert chains == first_chains
if seq == first_seq:
pass
else:
print(i, seq, first_seq)
In [36]:
# none should have the same seq as the native
fileLocation = "/Users/weilu/Research/server/nov_2019/organize_optimization/8_Rosetta_PDB/native_pdb/1.pdb"
first_chains, first_seq = chains, seq = getChainsAndSeq(fileLocation)
a = list(range(1, 91))
for i in list(range(1, 1001)):
if i % 50 == 0:
print(i)
fileLocation = f"/Users/weilu/Research/server/nov_2019/organize_optimization/8_Rosetta_PDB/1_pdb/{i}.pdb"
chains, seq = getChainsAndSeq(fileLocation)
assert chains == first_chains
if seq != first_seq:
pass
else:
print(i, seq, first_seq)
In [38]:
pre = "/Users/weilu/Research/server/nov_2019/organize_optimization/quick_specific_decoys"
do(f"mkdir -p {pre}")
do(f"mkdir -p {pre}/database/dompdb")
do(f"mkdir -p {pre}/database/S20_seq")
do(f"mkdir -p {pre}/phis")
do(f"mkdir -p {pre}/optimization/proteins_name_list")
do(f"mkdir -p {pre}/optimization/slurms")
do(f"mkdir -p {pre}/optimization/gammas")
do(f"mkdir -p {pre}/optimization/outs")
Out[38]:
In [47]:
folder = "/Users/weilu/Research/server/nov_2019/organize_optimization/8_Rosetta_PDB/"
pdb = "1"
source = f"{folder}/native_pdb/{pdb}.pdb"
target = f"{pre}/database/dompdb/{pdb}.pdb"
mycp(source, target)
seq = getSeq(target)
fileLocation = f"{pre}/database/S20_seq/{pdb}.seq"
with open(fileLocation, "w") as out:
out.write(seq+"\n")
In [48]:
do(f"mkdir -p {pre}/optimization/decoys/rosetta")
Out[48]:
In [82]:
p = PDBParser()
n_decoys = 10
for idx in range(1, 3):
structures = []
name_list = []
decoy_file_name = f"{i}_pdb/{idx}.pdb"
for i in range(1, n_decoys+1):
fileLocation = f"{folder}/{decoy_file_name}"
pdb_structure = p.get_structure("x", fileLocation)
structures.append(pdb_structure)
name_list.append(decoy_file_name)
a = pd.DataFrame({"Name":name_list})
a["structure"] = structures
a.to_pickle(f"{pre}/optimization/decoys/rosetta/d{idx}.pkl")
In [69]:
In [77]:
In [78]:
b = pd.read_pickle(f"{pre}/optimization/decoys/rosetta/d1.pkl")
In [83]:
protein_list = [f"p_{i}" for i in range(1, 101)]
In [84]:
fileLocation = f"/Users/weilu/Research/server/nov_2019/optimization_with_specific_decoys/optimization/protein_list"
with open(fileLocation, "w") as out:
for pdbName in protein_list:
out.write(f"{pdbName}\n")
In [ ]:
In [ ]:
In [ ]:
pdbFolderList = glob.glob("/Users/weilu/Dropbox/Optimization_Xfunnel/Structure_Ensemble_*")
In [4]:
pdbFolderList = glob.glob("/Users/weilu/Research/server/oct_2019/Database_Optimization/*")
In [5]:
len(pdbFolderList)
Out[5]:
In [6]:
# filter out those has incompelete peptide.txt
_all = []
# generate decoys
protein_list = []
for pdbFolder in pdbFolderList:
# pdbName = pdbFolder.split("_")[-1]
pdbName = pdbFolder.split("/")[-1]
source = pdbFolder + f"/*.pdb"
p_list = glob.glob(source)
# print(p_list, source)
# assert len(p_list) == 1
p = p_list[0]
chain_seq, seq = getChainsAndSeq(p)
# print(pdbName, i, len(seq), len(chain_seq))
decoy_list = []
if not os.path.exists(f"{pdbFolder}/peptide.txt"):
print(pdbFolder, "not exist")
continue
with open(f"{pdbFolder}/peptide.txt") as f:
for line in f:
pep = line.strip()
pep_len = len(pep)
# assert len(pep) == 9
for c in list(set(chain_seq)):
if chain_seq.count(c) == pep_len:
first_c = chain_seq.find(c)
a = list(seq)
a[first_c:first_c+pep_len] = pep
assert len(set(chain_seq[first_c:first_c+pep_len])) == 1
decoy = "".join(a)
decoy_list.append(decoy)
_all.append([pdbName, len(decoy_list)])
In [7]:
a = pd.DataFrame(_all, columns=["Name", "Length"])
a.query("Length < 1000").sort_values("Name")
Out[7]:
In [8]:
pdbList = list(a.query("Length == 1000")["Name"])
In [9]:
len(pdbFolderList)
Out[9]:
In [10]:
len(pdbList)
Out[10]:
In [31]:
f = open("/Users/weilu/Research/server/oct_2019/peptide_optimization_trial_12/optimization/protein_list", 'r')
x = f.readlines()
f.close()
In [32]:
x = list(set(["_".join(a.strip().split("_")[:-1]) for a in x]))
In [34]:
len(x)
Out[34]:
In [35]:
pdbList = x
In [38]:
# pre = "/Users/weilu/Research/server/sep_2019/peptide_optimization_trial_8_larger_set"
pre = "/Users/weilu/Research/server/oct_2019//peptide_optimization_trial_12_revmove_9_cases_10_per_protein"
do(f"mkdir -p {pre}")
do(f"mkdir -p {pre}/database/dompdb")
do(f"mkdir -p {pre}/database/S20_seq")
do(f"mkdir -p {pre}/optimization/decoys/shuffle")
# generate decoys
protein_list = []
for pdbFolder in pdbFolderList:
pdbName = pdbFolder.split("/")[-1]
if pdbName not in pdbList:
continue
source = pdbFolder + f"/*.pdb"
p_list = glob.glob(source)
# print(p_list, source)
# assert len(p_list) == 1
p = p_list[0]
chain_seq, seq = getChainsAndSeq(p)
# print(pdbName, i, len(seq), len(chain_seq))
decoy_list = []
f = open(f"{pdbFolder}/native.txt")
native = f.readlines()[0].strip()
pep_len = len(native)
# assert len(pep) == 9
for c in list(set(chain_seq)):
if chain_seq.count(c) == pep_len:
first_c = chain_seq.find(c)
a = list(seq)
a[first_c:first_c+pep_len] = native
assert len(set(chain_seq[first_c:first_c+pep_len])) == 1
native_seq = "".join(a)
with open(f"{pdbFolder}/peptide.txt") as f:
for line in f:
pep = line.strip()
pep_len = len(pep)
# assert len(pep) == 9
for c in list(set(chain_seq)):
if chain_seq.count(c) == pep_len:
first_c = chain_seq.find(c)
a = list(seq)
a[first_c:first_c+pep_len] = pep
assert len(set(chain_seq[first_c:first_c+pep_len])) == 1
decoy = "".join(a)
decoy_list.append(decoy)
# two branch. one: all become one.
if len(decoy_list) == 0:
continue
assert len(decoy_list) == 1000
for i in range(10):
# idx = i*22 + 1
idx = i*9 + 1
p = os.path.dirname(p_list[0]) + f"/{idx}.pdb"
opt_pdbName = f"{pdbName}_{idx}"
fileLocation = f"{pre}/optimization/decoys/shuffle/{opt_pdbName}.decoys"
# print("1", pdbName, len(decoy_list), pep_len)
with open(fileLocation, "w") as out:
# if len(decoy_list) != 1000:
# print("wrong")
# decoy_list = random.choices(decoy_list, k=1000)
for decoy in decoy_list:
out.write(decoy+"\n")
protein_list.append(opt_pdbName)
target = f"{pre}/database/dompdb/{opt_pdbName}.pdb"
## move native pdbs to dompdb
mycp(p, target)
## move native seq to S20_seq
# seq = getSeq(target)
seq = native_seq
fileLocation = f"{pre}/database/S20_seq/{opt_pdbName}.seq"
with open(fileLocation, "w") as out:
out.write(seq+"\n")
In [39]:
do(f"mkdir -p {pre}/optimization")
do(f"mkdir -p {pre}/phis")
## write protein_list
fileLocation = f"{pre}/optimization/protein_list"
with open(fileLocation, "w") as out:
for pdbName in protein_list:
out.write(f"{pdbName}\n")
# do(f"cp ~/opt/optimization/phi_list_contact.txt {pre}/optimization/phi_list.txt")
do(f"cp ~/opt/optimization/phi_list_debye.txt {pre}/optimization/phi_list.txt")
Out[39]:
In [48]:
# get unfinished job.
# pre = "/Users/weilu/Research/server/oct_2019/membrane_optimization"
cmd = f"grep 'CANCELLED' {pre}/optimization/outs/slurm-*"
has_cancelled = getFromTerminal(cmd)
aa = list(set([b.split(":")[0] for b in has_cancelled.split("\n")]))
# print(aa)
skip_pdbs = []
for a in aa:
cmd = f"grep 'AWSEM' {a}"
c = getFromTerminal(cmd)
if c == '':
print(a)
continue
# pdb = c
pdb = c.split("'")[1]
skip_pdbs.append(pdb)
# print(pdb)
print(len(skip_pdbs))
In [50]:
fileLocation = f"{pre}/optimization/skip_pdbs_protein_list"
with open(fileLocation, "w") as out:
for pdbName in skip_pdbs:
out.write(f"{pdbName}\n")
In [16]:
# pre = "/Users/weilu/Research/server/sep_2019/peptide_optimization_trial_8_larger_set"
pre = "/Users/weilu/Research/server/oct_2019//peptide_optimization_trial_10"
do(f"mkdir -p {pre}")
do(f"mkdir -p {pre}/database/dompdb")
do(f"mkdir -p {pre}/database/S20_seq")
do(f"mkdir -p {pre}/optimization/decoys/shuffle")
# generate decoys
protein_list = []
for pdbFolder in pdbFolderList:
# pdbName = pdbFolder.split("_")[-1]
if pdbName not in pdbList:
continue
source = pdbFolder + f"/*.pdb"
p_list = glob.glob(source)
# print(p_list, source)
assert len(p_list) == 1
p = p_list[0]
chain_seq, seq = getChainsAndSeq(p)
# print(pdbName, i, len(seq), len(chain_seq))
decoy_list = []
with open(f"{pdbFolder}/peptide.txt") as f:
for line in f:
pep = line.strip()
pep_len = len(pep)
# assert len(pep) == 9
for c in list(set(chain_seq)):
if chain_seq.count(c) == pep_len:
first_c = chain_seq.find(c)
a = list(seq)
a[first_c:first_c+pep_len] = pep
assert len(set(chain_seq[first_c:first_c+pep_len])) == 1
decoy = "".join(a)
decoy_list.append(decoy)
# two branch. one: all become one.
if len(decoy_list) == 0:
continue
opt_pdbName = f"{pdbName}"
fileLocation = f"{pre}/optimization/decoys/shuffle/{opt_pdbName}.decoys"
# print("1", pdbName, len(decoy_list), pep_len)
with open(fileLocation, "w") as out:
if len(decoy_list) != 1000:
print("wrong")
decoy_list = random.choices(decoy_list, k=1000)
for decoy in decoy_list:
out.write(decoy+"\n")
protein_list.append(opt_pdbName)
target = f"{pre}/database/dompdb/{opt_pdbName}.pdb"
## move native pdbs to dompdb
mycp(p, target)
## move native seq to S20_seq
seq = getSeq(target)
fileLocation = f"{pre}/database/S20_seq/{opt_pdbName}.seq"
with open(fileLocation, "w") as out:
out.write(seq+"\n")
# # two branch. one: set as 100 per structure.
# n = int(len(decoy_list) // 100)
# for i in range(n):
# opt_pdbName = f"{pdbName}_{i}"
# fileLocation = f"{pre}/optimization/decoys/shuffle/{opt_pdbName}.decoys"
# # print("1", pdbName, len(decoy_list), pep_len)
# with open(fileLocation, "w") as out:
# for decoy in decoy_list[i*100:(i+1)*100]:
# out.write(decoy+"\n")
# protein_list.append(opt_pdbName)
# target = f"{pre}/database/dompdb/{opt_pdbName}.pdb"
# ## move native pdbs to dompdb
# mycp(p, target)
# ## move native seq to S20_seq
# seq = getSeq(target)
# fileLocation = f"{pre}/database/S20_seq/{opt_pdbName}.seq"
# with open(fileLocation, "w") as out:
# out.write(seq+"\n")
# fileLocation = f"{pre}/optimization/decoys/shuffle/{pdbName}.decoys"
# # if len(decoy_list) == 0:
# # print(pdbName)
# print("1", pdbName, len(decoy_list), pep_len)
# with open(fileLocation, "w") as out:
# for decoy in decoy_list:
# out.write(decoy+"\n")
In [21]:
len(protein_list)
Out[21]:
In [22]:
do(f"mkdir -p {pre}/optimization")
do(f"mkdir -p {pre}/phis")
## write protein_list
fileLocation = f"{pre}/optimization/protein_list"
with open(fileLocation, "w") as out:
for pdbName in protein_list:
out.write(f"{pdbName}\n")
do(f"cp ~/opt/optimization/phi_list_contact.txt {pre}/optimization/phi_list.txt")
Out[22]:
In [ ]:
In [80]:
df = pd.DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
d1 = df.assign(
A_cut=pd.cut(df.A, 2),
B_cut=pd.cut(df.B, 2)
)
d1["count"] = 1
d2 = d1[["A_cut", "B_cut", "count"]].groupby(["A_cut", "B_cut"]).sum().reset_index()
d2["x"] = d2["A_cut"].apply(lambda x: x.mid)
d2["y"] = d2["B_cut"].apply(lambda x: x.mid)
d3 = d2[["x", "y", "count"]].reset_index()
d3["z"] = d3["count"]/ d3["count"].sum()
d3["logz"] = np.log(d3["z"])
In [ ]:
# pre = "/Users/weilu/Research/server/sep_2019/peptide_optimization_specific_test"
pre = "/Users/weilu/Research/server/sep_2019/peptide_optimization_trial_6"
do(f"mkdir -p {pre}/database/dompdb")
do(f"mkdir -p {pre}/database/S20_seq")
for pdbFolder in pdbFolderList:
pdbName = pdbFolder.split("_")[-1]
source = pdbFolder + f"/*.pdb"
p_list = glob.glob(source)
if len(p_list) > 1:
print(p_list)
continue
if len(p_list) == 0:
print("no", pdbName)
for p in p_list:
target = f"{pre}/database/dompdb/{pdbName}.pdb"
## move native pdbs to dompdb
mycp(p, target)
## move native seq to S20_seq
seq = getSeq(target)
fileLocation = f"{pre}/database/S20_seq/{pdbName}.seq"
with open(fileLocation, "w") as out:
out.write(seq+"\n")
In [82]:
_all = []
# generate decoys
protein_list = []
for pdbFolder in pdbFolderList:
pdbName = pdbFolder.split("_")[-1]
source = pdbFolder + f"/*.pdb"
p_list = glob.glob(source)
# print(p_list, source)
assert len(p_list) == 1
p = p_list[0]
chain_seq, seq = getChainsAndSeq(p)
# print(pdbName, i, len(seq), len(chain_seq))
decoy_list = []
with open(f"{pdbFolder}/peptide.txt") as f:
for line in f:
pep = line.strip()
pep_len = len(pep)
# assert len(pep) == 9
for c in list(set(chain_seq)):
if chain_seq.count(c) == pep_len:
first_c = chain_seq.find(c)
a = list(seq)
a[first_c:first_c+pep_len] = pep
assert len(set(chain_seq[first_c:first_c+pep_len])) == 1
decoy = "".join(a)
decoy_list.append(decoy)
_all.append([pdbName, len(decoy_list)])
In [85]:
In [92]:
a = pd.DataFrame(_all, columns=["Name", "Length"])
a.query("Length < 300 and Length != 0").sort_values("Name")
Out[92]:
In [ ]:
In [ ]:
In [29]:
do(f"mkdir -p {pre}/optimization/decoys/shuffle")
# generate decoys
for pdbFolder in pdbFolderList:
pdbName = pdbFolder.split("_")[-1]
source = pdbFolder + f"/*ab.pdb"
p_list = glob.glob(source)
if pdbName != "3D39":
continue
# print(p_list, source)
assert len(p_list) == 1
chain_seq, seq = getChainsAndSeq(p_list[0])
print(pdbName, i, len(seq), len(chain_seq))
decoy_list = []
decoy = ""
with open(f"{pdbFolder}/peptide.txt") as f:
for line in f:
pep = line.strip()
assert len(pep) == 9
for c in list(set(chain_seq)):
if chain_seq.count(c) == 9:
first_c = chain_seq.find(c)
a = list(seq)
a[first_c:first_c+9] = pep
decoy = "".join(a)
decoy_list.append(decoy)
fileLocation = f"{pre}/optimization/decoys/shuffle/{pdbName}.decoys"
with open(fileLocation, "w") as out:
for decoy in decoy_list:
out.write(decoy+"\n")
In [31]:
list(set(chain_seq))
Out[31]:
In [32]:
chain_seq.count("Y")
Out[32]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
do(f"mkdir -p {pre}/optimization")
## write protein_list
fileLocation = f"{pre}/optimization/protein_list"
with open(fileLocation, "w") as out:
for pdbFolder in pdbFolderList:
pdbName = pdbFolder.split("_")[-1]
out.write(f"{pdbName}\n")
In [205]:
## write protein_list
fileLocation = f"{pre}/optimization/protein_list"
with open(fileLocation, "w") as out:
for pdbFolder in pdbFolderList:
pdbName = pdbFolder.split("_")[-1]
for i in range(1, 6):
out.write(f"{pdbName}_{i}\n")
# generate decoys
for pdbFolder in pdbFolderList:
pdbName = pdbFolder.split("_")[-1]
for i in range(1, 6):
source = pdbFolder + f"/{i}.pdb"
source = pdbFolder + f"/*ab.pdb"
p_list = glob.glob(source)
assert len(p_list) == 1
chain_seq, seq = getChainsAndSeq(source)
print(pdbName, i, len(seq), len(chain_seq))
decoy_list = []
with open(f"{pdbFolder}/peptide.txt") as f:
for line in f:
pep = line.strip()
assert len(pep) == 9
for c in list(set(chain_seq)):
if chain_seq.count(c) == 9:
first_c = chain_seq.find(c)
a = list(seq)
a[first_c:first_c+9] = pep
decoy = "".join(a)
decoy_list.append(decoy)
fileLocation = f"{pre}/optimization/decoys/shuffle/{pdbName}_{i}.decoys"
with open(fileLocation, "w") as out:
for decoy in decoy_list:
out.write(decoy+"\n")
In [19]:
In [168]:
# ensure they all 9 residue peptide.
for pdbFolder in pdbFolderList:
pdbName = pdbFolder.split("_")[-1]
# print(pdbName)
with open(f"{pdbFolder}/peptide.txt") as f:
for line in f:
pep = line.strip()
assert len(pep) == 9
In [160]:
fileLocation
Out[160]:
In [164]:
for c in "ABCDEFG":
if chain_seq.count(c) == 9:
first_c = chain_seq.find(c)
a = list(seq)
a[first_c:first_c+9] = pep
decoy = "".join(a)
decoy_list.append(decoy)
Out[164]:
In [ ]:
In [3]:
# get seq
fileLocation = "/Users/weilu/Research/examples/optimization/optimization/Structure_Ensemble/1.pdb"
p = PDBParser()
pdb = p.get_structure("test", fileLocation)
residues = list(pdb.get_residues())
seq = ""
chains = ""
for residue in residues:
res_id = residue.get_id()[0]
chain = residue.get_full_id()[2]
if res_id==' ':
residue_name = residue.get_resname()
seq += three_to_one(residue_name)
chains += chain
In [5]:
# get decoy
decoy_list = []
with open("/Users/weilu/Research/examples/optimization/optimization/Structure_Ensemble/peptide.txt") as f:
for line in f:
pep = line.strip()
assert len(pep) == 9
a = list(seq)
a[180:189] = pep
decoy = "".join(a)
decoy_list.append(decoy)
In [38]:
for i in range(1, 91):
pre = "/Users/weilu/Research/server/sep_2019/peptide_optimization"
fileLocation = f"{pre}/database/S20_seq/{i}.seq"
with open(fileLocation, "w") as out:
out.write(seq+"\n")
In [40]:
for i in range(1, 91):
fileLocation = f"{pre}/optimization/decoys/shuffle/{i}.decoys"
with open(fileLocation, "w") as out:
for decoy in decoy_list:
out.write(decoy+"\n")
In [42]:
with open("/Users/weilu/Research/server/sep_2019/peptide_optimization/optimization/protein_list", "w") as out:
for i in range(1, 91):
out.write(f"{i}\n")
In [41]:
for i in range(1, 91):
os.system(f"cp /Users/weilu/Research/examples/optimization/optimization/Structure_Ensemble/{i}.pdb /Users/weilu/Research/server/sep_2019/peptide_optimization/database/dompdb/")
In [39]:
len(decoy_list)
Out[39]:
In [6]:
seq[180:189]
Out[6]:
In [49]:
a = list(seq)
a[180:189] = list('FIFLLFLTL')
In [151]:
In [26]:
all_seq = []
for i in range(1, 91):
fileLocation = f"/Users/weilu/Research/examples/optimization/optimization/Structure_Ensemble/{i}.pdb"
seq = getSeq(fileLocation)
assert len(seq) == 414
all_seq.append(seq)
# assert preSeq == seq
# preSeq = seq
In [27]:
all_seq[0]
Out[27]:
In [28]:
all_seq[0] == all_seq[1]
Out[28]:
In [29]:
for i in range(90):
if all_seq[i] != all_seq[0]:
print(i)
In [9]:
def getAllFrames(movieLocation):
# movieLocation = "/Users/weilu/Research/examples/openMM_simulation/test_2/movie.pdb"
location = movieLocation
with open(location) as f:
a = f.readlines()
n = len(a)
# get the position of every model title
model_title_index_list = []
for i in range(n):
if len(a[i]) >= 5 and a[i][:5] == "MODEL":
model_title_index = i
model_title_index_list.append(model_title_index)
model_title_index_list.append(n)
check_array = np.diff(model_title_index_list)
if np.allclose(check_array, check_array[0]):
size = check_array[0]
elif np.allclose(check_array[:-1], check_array[0]) and check_array[-1] == check_array[0] + 1:
# this is ok. with extra "END"
size = check_array[0]
else:
print("!!!! Someting is wrong !!!!")
print(check_array)
return a
In [10]:
num_of_frames = int(n/size)
In [11]:
frame = 5
oneFrame = a[size*frame:size*(frame+1)]
In [60]:
frame = num_of_frames
oneFrame = a[size*frame:size*(frame+1)]
In [61]:
oneFrame
Out[61]:
In [45]:
# s = p.get_structure("test", f)
# residues = list(s.get_residues())
In [24]:
import io
f = io.StringIO("".join(oneFrame))
MAX_OFFSET=4
DISTANCE_CUTOFF=9.5
s = p.get_structure("test", f)
chains = s[0].get_list()
# import pdb file
native_coords = []
for chain in chains:
dis = []
all_res = []
for res in chain:
is_regular_res = res.has_id('CA') and res.has_id('O')
res_id = res.get_id()[0]
if (res.get_resname()=='GLY'):
native_coords.append(res['CA'].get_coord())
elif (res_id==' ' or res_id=='H_MSE' or res_id=='H_M3L' or res_id=='H_CAS') and is_regular_res:
native_coords.append(res['CB'].get_coord())
else:
print('ERROR: irregular residue at %s!' % res)
exit()
native_contacts_table = compute_native_contacts(native_coords, MAX_OFFSET, DISTANCE_CUTOFF)
In [25]:
native_contacts_table
Out[25]:
In [62]:
plt.imshow(native_contacts_table, origin=[0,0])
Out[62]:
In [27]:
def get_contactFromDMP(fileLocation, n, threshold=0.2):
a = np.zeros((n,n))
c_list = []
with open(fileLocation, "r") as f:
# for i in range(9):
# next(f)
for line in f:
# print(line)
try:
i,j,_,_,_,p = line.split(" ")
# print(i,j,p)
a[int(i)-1,int(j)-1] = float(p)
a[int(j)-1,int(i)-1] = float(p)
if float(p) > threshold:
c_list.append([int(i),int(j),float(p)])
except Exception as e:
print(e)
pass
return a, np.array(c_list)
name = "cannabinoid_receptor"
n = 472
# fileLocation = f"/Users/weilu/Research/server/oct_2019/draw_contact_for_DMP/{name}.deepmetapsicov.con"
# name = "serotonin_1A_receptor"
# n = 422
# fileLocation = f"/Users/weilu/Research/server/oct_2019/GPCRs_reorder/simulation_setups/DMP/{name}/{name}.deepmetapsicov.con"
fileLocation = f"/Users/weilu/opt/gremlin/protein/{name}/DMP/{name}.deepmetapsicov.con"
a, _ = get_contactFromDMP(fileLocation, n)
In [33]:
t_s = a.astype(float)
plt.imshow(np.log(t_s), origin="bottom", cmap="Greys")
plt.colorbar()
Out[33]:
In [34]:
t_s = a.astype(float)
plt.imshow(np.log(t_s), origin="bottom", cmap="Greys")
plt.colorbar()
Out[34]:
In [ ]: