In [3]:
!rm -f SRR003265_1.filt.fastq.gz 2>/dev/null
!rm -f SRR003265_2.filt.fastq.gz 2>/dev/null
!wget ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/phase3/data/NA18489/sequence_read/SRR003265_1.filt.fastq.gz
!wget ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/phase3/data/NA18489/sequence_read/SRR003265_2.filt.fastq.gz


--2015-06-26 14:53:45--  ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/phase3/data/NA18489/sequence_read/SRR003265_1.filt.fastq.gz
           => ‘SRR003265_1.filt.fastq.gz’
Resolving ftp.1000genomes.ebi.ac.uk (ftp.1000genomes.ebi.ac.uk)... 193.62.192.8
Connecting to ftp.1000genomes.ebi.ac.uk (ftp.1000genomes.ebi.ac.uk)|193.62.192.8|:21... connected.
Logging in as anonymous ... Logged in!
==> SYST ... done.    ==> PWD ... done.
==> TYPE I ... done.  ==> CWD (1) /vol1/ftp/phase3/data/NA18489/sequence_read ... done.
==> SIZE SRR003265_1.filt.fastq.gz ... 502660639
==> PASV ... done.    ==> RETR SRR003265_1.filt.fastq.gz ... done.
Length: 502660639 (479M) (unauthoritative)

SRR003265_1.filt.fa 100%[=====================>] 479.37M  8.72MB/s   in 68s    

2015-06-26 14:54:53 (7.07 MB/s) - ‘SRR003265_1.filt.fastq.gz’ saved [502660639]

--2015-06-26 14:54:53--  ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/phase3/data/NA18489/sequence_read/SRR003265_2.filt.fastq.gz
           => ‘SRR003265_2.filt.fastq.gz’
Resolving ftp.1000genomes.ebi.ac.uk (ftp.1000genomes.ebi.ac.uk)... 193.62.192.8
Connecting to ftp.1000genomes.ebi.ac.uk (ftp.1000genomes.ebi.ac.uk)|193.62.192.8|:21... connected.
Logging in as anonymous ... Logged in!
==> SYST ... done.    ==> PWD ... done.
==> TYPE I ... done.  ==> CWD (1) /vol1/ftp/phase3/data/NA18489/sequence_read ... done.
==> SIZE SRR003265_2.filt.fastq.gz ... 484084218
==> PASV ... done.    ==> RETR SRR003265_2.filt.fastq.gz ... done.
Length: 484084218 (462M) (unauthoritative)

SRR003265_2.filt.fa 100%[=====================>] 461.66M  5.02MB/s   in 91s    

2015-06-26 14:56:24 (5.08 MB/s) - ‘SRR003265_2.filt.fastq.gz’ saved [484084218]


In [1]:
from __future__ import division, print_function

In [2]:
import gzip
from Bio import SeqIO

In [3]:
import sys
if sys.version_info[0] == 2:
    import itertools
    my_zip = itertools.izip
else:
    my_zip = zip
f1 = gzip.open('SRR003265_1.filt.fastq.gz', 'rt')
f2 = gzip.open('SRR003265_2.filt.fastq.gz', 'rt')
recs1 = SeqIO.parse(f1, 'fastq')
recs2 = SeqIO.parse(f2, 'fastq')
cnt = 0
for rec1, rec2 in my_zip(recs1, recs2):
    cnt +=1
print('Number of pairs: %d' % cnt)


Number of pairs: 9170808

In [4]:
print(type(range(100000)))
print(type(xrange(100000)))
%timeit range(100000)
%timeit xrange(100000)
%timeit xrange(100000)[5000]


<type 'list'>
<type 'xrange'>
1000 loops, best of 3: 828 µs per loop
The slowest run took 13.50 times longer than the fastest. This could mean that an intermediate result is being cached 
10000000 loops, best of 3: 141 ns per loop
The slowest run took 10.96 times longer than the fastest. This could mean that an intermediate result is being cached 
10000000 loops, best of 3: 174 ns per loop

In [5]:
%timeit range(100000)[10]
%timeit xrange(100000)[10]
%timeit range(100000)[50000]
%timeit xrange(100000)[50000]
%timeit range(100000)[-1]
%timeit xrange(100000)[-1]


1000 loops, best of 3: 828 µs per loop
The slowest run took 23.51 times longer than the fastest. This could mean that an intermediate result is being cached 
1000000 loops, best of 3: 172 ns per loop
1000 loops, best of 3: 831 µs per loop
The slowest run took 10.88 times longer than the fastest. This could mean that an intermediate result is being cached 
10000000 loops, best of 3: 175 ns per loop
1000 loops, best of 3: 829 µs per loop
The slowest run took 9.39 times longer than the fastest. This could mean that an intermediate result is being cached 
1000000 loops, best of 3: 203 ns per loop

In [6]:
print(type(range(10)))
print(type(zip([10])))
print(type(filter(lambda x: x > 10, [10, 11])))
print(type(map(lambda x: x + 1, [10])))


<type 'list'>
<type 'list'>
<type 'list'>
<type 'list'>

In [7]:
big_10 = filter(lambda x: x > 10, [9, 10, 11, 23])
big_10_list = list(big_10)
print(big_10_list[1])


23

In [8]:
import numpy as np
from Bio import SeqUtils
from Bio.Alphabet import IUPAC

f = gzip.open('SRR003265_2.filt.fastq.gz', 'rt')
recs = SeqIO.parse(f, 'fastq', alphabet=IUPAC.unambiguous_dna)
i = 0
sum_skews = 0
for rec in recs:
    if np.median(rec.letter_annotations['phred_quality']) < 40:
        continue
    skew = SeqUtils.GC_skew(rec.seq)[0]
    sum_skews += skew
    if i == 1000:
        break
    i += 1
print (sum_skews / (i + 1))


-0.0174188277631

In [9]:
def get_gcs(recs):
    for rec in recs:
        yield SeqUtils.GC_skew(rec.seq)[0]

def filter_quality(recs):
    for rec in recs:
        if np.median(rec.letter_annotations['phred_quality']) >= 40:
            yield rec
        
f = gzip.open('SRR003265_2.filt.fastq.gz', 'rt')
recs = SeqIO.parse(f, 'fastq', alphabet=IUPAC.unambiguous_dna)
sum_skews = 0
for i, skew in enumerate(get_gcs(filter_quality(recs))):
    sum_skews += skew
    if i == 1000:
        break
print (sum_skews / (i + 1))


-0.0174188277631

In [ ]: