Aim

Given a large set of sequences or graphs with ordered vertices find small vertex ordered subsequences that are most discriminative for the set.

Steps:

  • devise a negative set
  • learn a discriminative model
  • annotate importance on vertices
  • extract max subarrays
  • cluster them
    • use fast EDeN string kernel
    • custering algorithm

Output:

  1. all sequence motives in each cluster
  2. all initial sequences with motif location (begin,end) and cluster id (build regex from all seqs in cluster and run a find iterator)

In [1]:
%load_ext autoreload
%autoreload 2

In [2]:
#code for making artificial dataset
import random
def random_string(length,alphabet_list):
    rand_str = ''.join(random.choice(alphabet_list) for i in range(length))
    return rand_str

def perturb(seed,alphabet_list,p=0.5):
    seq=''
    for c in seed:
        if random.random() < p: c = random.choice(alphabet_list)
        seq += c
    return seq

def make_artificial_dataset(alphabet='ACGU', motives=None, motif_length=6, sequence_length=100, n_sequences=1000, n_motives=2, p=0.2):
    alphabet_list=[c for c in alphabet]
    
    if motives is None:
        motives=[]
        for i in range(n_motives):
            motives.append(random_string(motif_length,alphabet_list))
    else:
        motif_length = len(motives[0])
        n_motives = len(motives)
        
    flanking_length = (sequence_length - motif_length ) / 2
    n_seq_per_motif = n_sequences / n_motives

    counter=0
    seqs=[]
    for i in range(n_seq_per_motif):
        for j in range(n_motives):
            left_flanking = random_string(flanking_length,alphabet_list)
            right_flanking = random_string(flanking_length,alphabet_list)
            noisy_motif = perturb(motives[j],alphabet_list,p)
            seq = left_flanking + noisy_motif + right_flanking
            seqs.append(('>ID%d'%counter,seq))
            counter += 1
    return motives, seqs

In [3]:
from eden.motif import SequenceMotif
help(SequenceMotif)


Help on class SequenceMotif in module eden.motif:

class SequenceMotif(__builtin__.object)
 |  Methods defined here:
 |  
 |  __init__(self, min_subarray_size=7, max_subarray_size=10, min_motif_count=1, min_cluster_size=1, training_size=None, negative_ratio=2, shuffle_order=2, n_iter_search=1, complexity=4, nbits=20, clustering_algorithm=None, n_jobs=4, n_blocks=8, block_size=None, pre_processor_n_jobs=4, pre_processor_n_blocks=8, pre_processor_block_size=None, random_state=1)
 |  
 |  fit(self, seqs, neg_seqs=None)
 |      Builds a discriminative estimator. 
 |      Identifies the maximal subarrays in the data. 
 |      Clusters them with the clustering algorithm provided in the initialization phase.
 |      For each cluster builds a fast sequence search model (Aho Corasick data structure).
 |  
 |  fit_predict(self, seqs, return_list=False)
 |  
 |  fit_transform(self, seqs, return_match=False)
 |  
 |  load(self, obj)
 |  
 |  predict(self, seqs, return_list=False)
 |      Returns for each instance a list with the cluster ids that have a hit
 |      if  return_list=False then just return 1 if there is at least one hit from one cluster.
 |  
 |  save(self, model_name)
 |  
 |  transform(self, seqs, return_match=False)
 |      Transform an instance to a dense vector with features as cluster ID and entries 0/1 if a motif is found,
 |      if 'return_match' argument is True, then write a pair with (start position,end position)  in the entry instead of 0/1
 |  
 |  ----------------------------------------------------------------------
 |  Data descriptors defined here:
 |  
 |  __dict__
 |      dictionary for instance variables (if defined)
 |  
 |  __weakref__
 |      list of weak references to the object (if defined)

Experimental Setup


In [4]:
#setup parameters
alphabet='ACGU'
motives=['AAAAAAAAAA','CCCCCCCCCC','GGGGGGGGGG','UUUUUUUUUU']
sequence_length=100
n_sequences=100
p=0.3

#make dataset
motives, seqs = make_artificial_dataset(alphabet=alphabet,motives=motives,sequence_length=sequence_length,n_sequences=n_sequences,p=p)

#display
print 'Motives and sample of their perturbed variants:'
alphabet_list=[c for c in alphabet]
for motif in motives: 
    print
    print motif,
    for i in range(9):
        print perturb(motif,alphabet_list,p=p),


Motives and sample of their perturbed variants:

AAAAAAAAAA AAAAACAUAA AAAAGAAAAA AAGAAAAAGA AAAAAAUAAA AAAAAAAAAA AAACGAAAAA AAAUAAAAAA AAGAAAAUAA UAUAAAAAAG
CCCCCCCCCC CCCCCCCCUU CCGCCCCCCU ACCCCCCUCC CCCCCCCCCC CCCCCCCCGC CCAGCCACCG CCGCCCCCAC ACCCCCCCCC CCCCCCCCCU
GGGGGGGGGG GGGCGGUGGU GGGGCAGAGG GGGGGGGCGA AGGGGGGGAC UUGGGGGAGG GCGUGGGGGG CGGGAGGAGG GUGGGGGGUG AGCGGGAGGU
UUUUUUUUUU GUUCUUUUUU UGGUUUCUUU AUUUCCUUUG UUUUUUUUUU UUUUUUUUUU UUUUCUUUCU UUUUUCUUAU UCUUUUUUAU UAUUUCGUUG

In [11]:
#save to file
fname='artificial_motif_search_dataset.fa'
with open(fname,'w') as f:
    for header,seq in seqs: 
        f.write(header+"\n")
        f.write(seq+"\n")

#save explicit negative sequences
from eden.modifier.seq import seq_to_seq, shuffle_modifier
neg_seqs = list(seq_to_seq(seqs, modifier=shuffle_modifier, times=2, order=2))
fname='artificial_motif_search_dataset_negatives.fa'
with open(fname,'w') as f:
    for header,seq in neg_seqs: 
        f.write(header+"\n")
        f.write(seq+"\n")

In [12]:
from eden.util import configure_logging
import logging
configure_logging(logging.getLogger(),verbosity=2)

In [15]:
%%time
from sklearn.cluster import Birch
ca = Birch(threshold=0.1, n_clusters=4, branching_factor=50)

from eden.motif import SequenceMotif
seqmot = SequenceMotif(training_size=100, complexity=2, nbits=14, clustering_algorithm=ca)
seqmot.fit(seqs, neg_seqs)
seqmot.save('seqmot')


model induction: 100 positive instances 4 secs
motives extraction: 67 motives 1 secs
motives clustering: 4 clusters 0 secs
after filtering: 66 motives 4 clusters 0 secs
motif model construction: 0 secs
CPU times: user 2.99 s, sys: 661 ms, total: 3.65 s
Wall time: 5.9 s

In [16]:
for cluster_id in seqmot.motives_db:
    print cluster_id
    for count, motif in sorted(seqmot.motives_db[cluster_id], reverse=True):
        print motif, count


0
UUGGGUGGGA 1
UUGCGGGUUG 1
UGGUUUCUUU 1
UGGGGGGGGC 1
UGGGGGCGGG 1
GGGUUCUUUA 1
GGGGUUUUU 1
GGGGUCAGUC 1
GGGGGGUUGG 1
GGGGGGGGU 1
GGGGGGGGGU 1
GGGGGGGGG 1
GGGGGGGCG 1
GGGGGGCCCC 1
GGGGCGGGU 1
GGGGAAAAAC 1
GGCGGGCGG 1
GCGCGGAGA 1
GAAGAUUUUG 1
GAAGAGGGA 1
CACUAGAUUU 1
AGGGGGGCC 1
AGAGUUGAGA 1
AGAGGGGGG 1
ACUGAGACG 1
1
UUUUUUUUUG 1
UUUUUUUGUU 1
UUUGUCACUA 1
UGGUUUUUUG 1
UCUUUUUUUA 1
GUUUUUUUUU 1
GUUUUUUUU 1
GUUUUUUUC 1
GUCUCUGUC 1
GGUUUUUAG 1
CUUUUUUUG 1
CUUUUAUCAG 1
CUUUGUUUUU 1
CUUUCUUUUA 1
2
UCCCCCCCG 1
GCCGCCCCC 1
GCCCCCCCAC 1
GAAACCCCA 1
CCCGCCGCC 1
CCCCCCCGCC 1
CCCCCCCCUC 1
CCCCCCCCG 1
CCCCCCCCCC 1
CCCCCCCACU 1
CCCCCACCCG 1
ACCGCCCCC 1
ACCCCCCGCC 1
3
AAAAAAAAAA 2
UGAGAAAAG 1
GUAUAAAAC 1
GAAAAAUAUA 1
GAAAAAAAAA 1
CUAGAAAAG 1
CGACAAAAC 1
CAAACAAAAA 1
AGAAAAAAAA 1
AAGACAAAA 1
AAACAAAAAC 1
AAAAGAUCAC 1
AAAACAAAAA 1
AAAAAAGAAA 1

In [17]:
from eden.motif import SequenceMotif
seqmot2 = SequenceMotif()
seqmot2.load('seqmot')

predictions=seqmot2.predict(seqs, return_list=True)
for p in predictions: print p


[]
[1, 2]
[1]
[1]
[]
[2]
[]
[0, 1]
[0, 3]
[0]
[0]
[]
[3]
[]
[]
[]
[0]
[2]
[]
[1]
[]
[]
[0]
[]
[0, 1, 3]
[]
[0]
[0, 1]
[3]
[2]
[0]
[1]
[]
[]
[0, 2]
[1]
[3]
[2]
[3]
[1]
[3]
[]
[]
[]
[3]
[]
[0]
[0]
[3]
[0]
[]
[]
[3]
[2]
[0]
[1]
[]
[]
[0]
[0, 3]
[3]
[2]
[0]
[2]
[3]
[]
[3]
[]
[]
[2]
[]
[0]
[]
[0]
[]
[]
[]
[1, 2]
[]
[]
[]
[2]
[0]
[1]
[3]
[3]
[]
[]
[3]
[2]
[]
[]
[]
[]
[0]
[1]
[]
[]
[0]
[1]

In [18]:
predictions=seqmot2.predict(seqs, return_list=False)
for p in predictions: print p


0
1
1
1
0
1
0
1
1
1
1
0
1
0
0
0
1
1
0
1
0
0
1
0
1
0
1
1
1
1
1
1
0
0
1
1
1
1
1
1
1
0
0
0
1
0
1
1
1
1
0
0
1
1
1
1
0
0
1
1
1
1
1
1
1
0
1
0
0
1
0
1
0
1
0
0
0
1
0
0
0
1
1
1
1
1
0
0
1
1
0
0
0
0
1
1
0
0
1
1

In [19]:
predictions=seqmot2.transform(seqs, return_match=True)
for p in predictions: print p


[[], [], [], []]
[[], [(0, 10)], [(43, 53)], []]
[[], [(72, 82)], [], []]
[[], [(44, 53), (44, 54)], [], []]
[[], [], [], []]
[[], [], [(45, 54)], []]
[[], [], [], []]
[[(16, 25)], [(43, 52)], [], []]
[[(72, 82)], [], [], [(45, 55)]]
[[(28, 38)], [], [], []]
[[(47, 56)], [], [], []]
[[], [], [], []]
[[], [], [], [(44, 54)]]
[[], [], [], []]
[[], [], [], []]
[[], [], [], []]
[[(49, 58)], [], [], []]
[[], [], [(45, 54)], []]
[[], [], [], []]
[[], [(44, 53)], [], []]
[[], [], [], []]
[[], [], [], []]
[[(46, 56)], [], [], []]
[[], [], [], []]
[[(3, 12), (23, 32), (77, 86)], [(62, 71)], [], [(44, 53)]]
[[], [], [], []]
[[(48, 57)], [], [], []]
[[(18, 28)], [(44, 53)], [], []]
[[], [], [], [(44, 54), (45, 55)]]
[[], [], [(44, 53), (45, 55)], []]
[[(45, 54), (46, 55), (46, 56), (47, 56)], [], [], []]
[[], [(44, 54)], [], []]
[[], [], [], []]
[[], [], [], []]
[[(45, 54)], [], [(87, 96)], []]
[[], [(49, 58)], [], []]
[[], [], [], [(45, 55)]]
[[], [], [(44, 54)], []]
[[], [], [], [(6, 15)]]
[[], [(44, 54)], [], []]
[[], [], [], [(45, 55)]]
[[], [], [], []]
[[], [], [], []]
[[], [], [], []]
[[], [], [], [(44, 54)]]
[[], [], [], []]
[[(45, 54)], [], [], []]
[[(43, 53), (79, 89)], [], [], []]
[[], [], [], [(49, 59)]]
[[(78, 88)], [], [], []]
[[], [], [], []]
[[], [], [], []]
[[], [], [], [(44, 54)]]
[[], [], [(45, 55), (47, 56), (48, 58)], []]
[[(89, 99)], [], [], []]
[[], [(44, 54)], [], []]
[[], [], [], []]
[[], [], [], []]
[[(46, 55), (46, 56), (47, 56)], [], [], []]
[[(41, 51)], [], [], [(91, 100)]]
[[], [], [], [(43, 53), (44, 54), (45, 55)]]
[[], [], [(47, 57)], []]
[[(78, 88)], [], [], []]
[[], [], [(76, 85)], []]
[[], [], [], [(45, 55), (46, 56)]]
[[], [], [], []]
[[], [], [], [(68, 77)]]
[[], [], [], []]
[[], [], [], []]
[[], [], [(44, 54)], []]
[[], [], [], []]
[[(27, 37)], [], [], []]
[[], [], [], []]
[[(55, 65)], [], [], []]
[[], [], [], []]
[[], [], [], []]
[[], [], [], []]
[[], [(4, 14)], [(44, 54)], []]
[[], [], [], []]
[[], [], [], []]
[[], [], [], []]
[[], [], [(47, 56)], []]
[[(45, 55), (47, 56)], [], [], []]
[[], [(48, 57)], [], []]
[[], [], [], [(10, 19)]]
[[], [], [], [(25, 34)]]
[[], [], [], []]
[[], [], [], []]
[[], [], [], [(44, 54)]]
[[], [], [(44, 53)], []]
[[], [], [], []]
[[], [], [], []]
[[], [], [], []]
[[], [], [], []]
[[(46, 56)], [], [], []]
[[], [(45, 55)], [], []]
[[], [], [], []]
[[], [], [], []]
[[(46, 55)], [], [], []]
[[], [(43, 53)], [], []]

In [20]:
predictions=seqmot2.transform(seqs, return_match=False)
for p in predictions: print p


[0, 0, 0, 0]
[0, 1, 1, 0]
[0, 1, 0, 0]
[0, 1, 0, 0]
[0, 0, 0, 0]
[0, 0, 1, 0]
[0, 0, 0, 0]
[1, 1, 0, 0]
[1, 0, 0, 1]
[1, 0, 0, 0]
[1, 0, 0, 0]
[0, 0, 0, 0]
[0, 0, 0, 1]
[0, 0, 0, 0]
[0, 0, 0, 0]
[0, 0, 0, 0]
[1, 0, 0, 0]
[0, 0, 1, 0]
[0, 0, 0, 0]
[0, 1, 0, 0]
[0, 0, 0, 0]
[0, 0, 0, 0]
[1, 0, 0, 0]
[0, 0, 0, 0]
[1, 1, 0, 1]
[0, 0, 0, 0]
[1, 0, 0, 0]
[1, 1, 0, 0]
[0, 0, 0, 1]
[0, 0, 1, 0]
[1, 0, 0, 0]
[0, 1, 0, 0]
[0, 0, 0, 0]
[0, 0, 0, 0]
[1, 0, 1, 0]
[0, 1, 0, 0]
[0, 0, 0, 1]
[0, 0, 1, 0]
[0, 0, 0, 1]
[0, 1, 0, 0]
[0, 0, 0, 1]
[0, 0, 0, 0]
[0, 0, 0, 0]
[0, 0, 0, 0]
[0, 0, 0, 1]
[0, 0, 0, 0]
[1, 0, 0, 0]
[1, 0, 0, 0]
[0, 0, 0, 1]
[1, 0, 0, 0]
[0, 0, 0, 0]
[0, 0, 0, 0]
[0, 0, 0, 1]
[0, 0, 1, 0]
[1, 0, 0, 0]
[0, 1, 0, 0]
[0, 0, 0, 0]
[0, 0, 0, 0]
[1, 0, 0, 0]
[1, 0, 0, 1]
[0, 0, 0, 1]
[0, 0, 1, 0]
[1, 0, 0, 0]
[0, 0, 1, 0]
[0, 0, 0, 1]
[0, 0, 0, 0]
[0, 0, 0, 1]
[0, 0, 0, 0]
[0, 0, 0, 0]
[0, 0, 1, 0]
[0, 0, 0, 0]
[1, 0, 0, 0]
[0, 0, 0, 0]
[1, 0, 0, 0]
[0, 0, 0, 0]
[0, 0, 0, 0]
[0, 0, 0, 0]
[0, 1, 1, 0]
[0, 0, 0, 0]
[0, 0, 0, 0]
[0, 0, 0, 0]
[0, 0, 1, 0]
[1, 0, 0, 0]
[0, 1, 0, 0]
[0, 0, 0, 1]
[0, 0, 0, 1]
[0, 0, 0, 0]
[0, 0, 0, 0]
[0, 0, 0, 1]
[0, 0, 1, 0]
[0, 0, 0, 0]
[0, 0, 0, 0]
[0, 0, 0, 0]
[0, 0, 0, 0]
[1, 0, 0, 0]
[0, 1, 0, 0]
[0, 0, 0, 0]
[0, 0, 0, 0]
[1, 0, 0, 0]
[0, 1, 0, 0]

In [21]:
%%time
from sklearn.cluster import MiniBatchKMeans
ca = MiniBatchKMeans(n_clusters=4)

from eden.motif import SequenceMotif
seqmot = SequenceMotif(training_size=100, clustering_algorithm=ca)
seqmot.fit(seqs)

for cluster_id in seqmot.motives_db:
    print cluster_id
    for count, motif in sorted(seqmot.motives_db[cluster_id], reverse=True):
        print motif, count


model induction: 100 positive instances 8 secs
motives extraction: 67 motives 3 secs
motives clustering: 4 clusters 0 secs
after filtering: 65 motives 4 clusters 0 secs
motif model construction: 0 secs
0
UCCCCCCCG 2
UCCCGCCGC 1
UCCCCCCACG 1
UCCCCCCAAC 1
UCCCCCACCC 1
UCCACCCCCA 1
GCCCCCCCCG 1
GCCCCCCCA 1
GCCCCCAACC 1
CCCCUCAAAC 1
CCCCCCCCU 1
CCCCCCCCCC 1
CCCCCCCACU 1
CAACCCCCC 1
ACCCCCCCA 1
ACACCCUCCC 1
AAACCCCACG 1
1
AAAAAAAAA 2
UAAACACCU 1
UAAAAAAAAU 1
GACAACAAC 1
GAAAAAAAA 1
CCAAAAAAAC 1
CACACAAAC 1
CAAAAAACAA 1
ACAAACAAAA 1
AACACGACUG 1
AACAAAAAAG 1
AAACACAAAC 1
AAACAAAAAC 1
AAACAAAAA 1
AAAACAAAAA 1
AAAAAUAUAA 1
AAAAAAAAAA 1
2
UUGUGUUC 1
UGUCUUUCUG 1
UGGGGGGUU 1
UGGGGGGGG 1
GUUCUUUGG 1
GUCUGUCUG 1
GGUUUCUUU 1
GGGUUUUUA 1
GGGUUCUUUA 1
GGGGUUUUU 1
GGGGGGGGU 1
GGGGGGGGGU 1
GGGGGGGGGG 1
CUGUUCUUU 1
CGGGUUGU 1
CGCUGGGGG 1
3
UUUUUUUUUG 1
UUUUGUUUA 1
UUUGUUUUUG 1
UUUGUCACUA 1
UGGUUUUUC 1
GUUUUUUUUU 1
GUUUUUUUUC 1
GUUUUUUUC 1
GUUUUUUGUC 1
CUUUUUUUG 1
CUUUUUUUA 1
CUUUUGUUG 1
CUUUUCCCC 1
CUUUCUUUUA 1
AUUUUUUUG 1
CPU times: user 4.18 s, sys: 1.46 s, total: 5.64 s
Wall time: 12.7 s

In [23]:
%%time
from sklearn.cluster import DBSCAN
ca = DBSCAN(eps=0.2, min_samples=3)

from eden.motif import SequenceMotif
seqmot = SequenceMotif(training_size=100, clustering_algorithm=ca)
seqmot.fit(seqs)

for cluster_id in seqmot.motives_db:
    print cluster_id
    for count, motif in sorted(seqmot.motives_db[cluster_id], reverse=True):
        print motif, count


model induction: 100 positive instances 8 secs
motives extraction: 69 motives 3 secs
motives clustering: 6 clusters 0 secs
after filtering: 33 motives 5 clusters 0 secs
motif model construction: 0 secs
0
UCCCCCCCG 2
UCCCCCCACG 1
UCCCCCCAAC 1
UCCCCCACCC 1
GCCCCCCCA 1
CCCCCCCCU 1
CCCCCCCCG 1
CCCCCCCCCC 1
CCCCCCCACU 1
1
UGGGGGGGU 1
UGGGGGGGG 1
GGGGGGGGU 1
GGGGGGGGGU 1
GGGGGGGGGG 1
AGGGGGGUGG 1
AGGGGGGUA 1
2
UUUUUUUUUG 1
UUUUUUUGU 1
GUUUUUUUUU 1
GUUUUUUUUC 1
GUUUUUUUC 1
GUUUUUUGUC 1
CUUUUUUUGU 1
3
AAAACAAAAA 2
CCAAAAAAAC 1
CAAAAAACAA 1
AACAAAAAAG 1
AAACAAAAAC 1
4
UAAAAAAAA 1
GAAAAAAAAA 1
GAAAAAAAA 1
AAAAAAAAAA 1
AAAAAAAAA 1
CPU times: user 3.67 s, sys: 1.59 s, total: 5.27 s
Wall time: 11.9 s