In [6]:
from collections import defaultdict
import warnings
import logging
import gffutils
import pybedtools
import pandas as pd
import copy
import re
from gffutils.pybedtools_integration import tsses
logging.basicConfig(level=logging.INFO)
In [7]:
gtf = '/home/cmb-panasas2/skchoudh/genomes/S_cerevisiae/annotation/Saccharomyces_cerevisiae.R64-1-1.90.gtf'
gtf_db = '/home/cmb-panasas2/skchoudh/genomes/S_cerevisiae/annotation/Saccharomyces_cerevisiae.R64-1-1.90.gtf.db'
prefix = '/home/cmb-panasas2/skchoudh/genomes/S_cerevisiae/annotation/Saccharomyces_cerevisiae.R64-1-1.dna.toplevel.gffutils'
chrsizes = '/home/cmb-panasas2/skchoudh/genomes/S_cerevisiae/fasta/Saccharomyces_cerevisiae.R64-1-1.dna.toplevel.sizes'
In [8]:
db = gffutils.create_db(gtf, dbfn=gtf_db, disable_infer_genes=True, disable_infer_transcripts=True, merge_strategy='merge', force=True)
def create_gene_dict(db):
'''
Store each feature line db.all_features() as a dict of dicts
'''
gene_dict = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for line_no, feature in enumerate(db.all_features()):
gene_ids = feature.attributes['gene_id']
feature_type = feature.featuretype
if feature_type == 'gene':
if len(gene_ids)!=1:
logging.warning('Found multiple gene_ids on line {} in gtf'.format(line_no))
break
else:
gene_id = gene_ids[0]
gene_dict[gene_id]['gene'] = feature
else:
transcript_ids = feature.attributes['transcript_id']
for gene_id in gene_ids:
for transcript_id in transcript_ids:
gene_dict[gene_id][transcript_id][feature_type].append(feature)
return gene_dict
In [9]:
db = gffutils.FeatureDB(gtf_db, keep_order=True)
gene_dict = create_gene_dict(db)
In [10]:
for x in db.featuretypes():
print(x)
In [11]:
def get_gene_list(gene_dict):
return list(set(gene_dict.keys()))
def get_UTR_regions(gene_dict, gene_id, transcript, cds):
if len(cds)==0:
return [], []
utr5_regions = []
utr3_regions = []
utrs = gene_dict[gene_id][transcript]['UTR']
first_cds = cds[0]
last_cds = cds[-1]
for utr in utrs:
## Push all cds at once
## Sort later to remove duplicates
strand = utr.strand
if strand == '+':
if utr.stop < first_cds.start:
utr.feature_type = 'five_prime_UTR'
utr5_regions.append(utr)
elif utr.start > last_cds.stop:
utr.feature_type = 'three_prime_UTR'
utr3_regions.append(utr)
else:
raise RuntimeError('Error with cds')
elif strand == '-':
if utr.stop < first_cds.start:
utr.feature_type = 'three_prime_UTR'
utr3_regions.append(utr)
elif utr.start > last_cds.stop:
utr.feature_type = 'five_prime_UTR'
utr5_regions.append(utr)
else:
raise RuntimeError('Error with cds')
return utr5_regions, utr3_regions
def create_bed(regions, bedtype='0'):
'''Create bed from list of regions
bedtype: 0 or 1
0-Based or 1-based coordinate of the BED
'''
bedstr = ''
for region in regions:
assert len(region.attributes['gene_id']) == 1
## GTF start is 1-based, so shift by one while writing
## to 0-based BED format
if bedtype == '0':
start = region.start - 1
else:
start = region.start
bedstr += '{}\t{}\t{}\t{}\t{}\t{}\n'.format(region.chrom,
start,
region.stop,
re.sub('\.\d+', '', region.attributes['gene_id'][0]),
'.',
region.strand)
return bedstr
def rename_regions(regions, gene_id):
regions = list(regions)
if len(regions) == 0:
return []
for region in regions:
region.attributes['gene_id'] = gene_id
return regions
def merge_regions(db, regions):
if len(regions) == 0:
return []
merged = db.merge(sorted(list(regions), key=lambda x: x.start))
return merged
def merge_regions_nostrand(db, regions):
if len(regions) == 0:
return []
merged = db.merge(sorted(list(regions), key=lambda x: x.start), ignore_strand=True)
return merged
In [13]:
utr5_bed = ''
utr3_bed = ''
gene_bed = ''
exon_bed = ''
intron_bed = ''
start_codon_bed = ''
stop_codon_bed = ''
cds_bed = ''
gene_list = []
for gene_id in get_gene_list(gene_dict):
gene_list.append(gene_dict[gene_id]['gene'])
utr5_regions, utr3_regions = [], []
exon_regions, intron_regions = [], []
star_codon_regions, stop_codon_regions = [], []
cds_regions = []
for feature in gene_dict[gene_id].keys():
if feature == 'gene':
continue
cds = list(gene_dict[gene_id][feature]['CDS'])
exons = list(gene_dict[gene_id][feature]['exon'])
merged_exons = merge_regions(db, exons)
introns = db.interfeatures(merged_exons)
utr5_region, utr3_region = get_UTR_regions(gene_dict, gene_id, feature, cds)
utr5_regions += utr5_region
utr3_regions += utr3_region
exon_regions += exons
intron_regions += introns
cds_regions += cds
merged_utr5 = merge_regions(db, utr5_regions)
renamed_utr5 = rename_regions(merged_utr5, gene_id)
merged_utr3 = merge_regions(db, utr3_regions)
renamed_utr3 = rename_regions(merged_utr3, gene_id)
merged_exons = merge_regions(db, exon_regions)
renamed_exons = rename_regions(merged_exons, gene_id)
merged_introns = merge_regions(db, intron_regions)
renamed_introns = rename_regions(merged_introns, gene_id)
merged_cds = merge_regions(db, cds_regions)
renamed_cds = rename_regions(merged_cds, gene_id)
utr3_bed += create_bed(renamed_utr3)
utr5_bed += create_bed(renamed_utr5)
exon_bed += create_bed(renamed_exons)
intron_bed += create_bed(renamed_introns)
cds_bed += create_bed(renamed_cds)
gene_bed = create_bed(gene_list)
gene_bedtool = pybedtools.BedTool(gene_bed, from_string=True)
utr5_bedtool = pybedtools.BedTool(utr5_bed, from_string=True)
utr3_bedtool = pybedtools.BedTool(utr3_bed, from_string=True)
exon_bedtool = pybedtools.BedTool(exon_bed, from_string=True)
intron_bedtool = pybedtools.BedTool(intron_bed, from_string=True)
cds_bedtool = pybedtools.BedTool(cds_bed, from_string=True)
gene_bedtool.remove_invalid().sort().saveas('{}.genes.bed'.format(prefix))
utr5_bedtool.remove_invalid().sort().saveas('{}.UTR5.bed'.format(prefix))
utr3_bedtool.remove_invalid().sort().saveas('{}.UTR3.bed'.format(prefix))
exon_bedtool.remove_invalid().sort().saveas('{}.exon.bed'.format(prefix))
intron_bedtool.remove_invalid().sort().saveas('{}.intron.bed'.format(prefix))
cds_bedtool.remove_invalid().sort().saveas('{}.cds.bed'.format(prefix))
Out[13]:
In [14]:
for gene_id in get_gene_list(gene_dict):
start_codons = []
stop_codons = []
for start_codon in db.children(gene_id, featuretype='start_codon'):
## 1 -based stop
## 0-based start handled while converting to bed
start_codon.stop = start_codon.start
start_codons.append(start_codon)
for stop_codon in db.children(gene_id, featuretype='stop_codon'):
stop_codon.start = stop_codon.stop
stop_codon.stop = stop_codon.stop+1
stop_codons.append(stop_codon)
merged_start_codons = merge_regions(db, start_codons)
renamed_start_codons = rename_regions(merged_start_codons, gene_id)
merged_stop_codons = merge_regions(db, stop_codons)
renamed_stop_codons = rename_regions(merged_stop_codons, gene_id)
start_codon_bed += create_bed(renamed_start_codons)
stop_codon_bed += create_bed(renamed_stop_codons)
start_codon_bedtool = pybedtools.BedTool(start_codon_bed, from_string=True)
stop_codon_bedtool = pybedtools.BedTool(stop_codon_bed, from_string=True)
start_codon_bedtool.remove_invalid().sort().saveas('{}.start_codon.bed'.format(prefix))
stop_codon_bedtool.remove_invalid().sort().saveas('{}.stop_codon.bed'.format(prefix))
Out[14]:
In [15]:
## TSS
polyA_sites_bed = ''
tss_sites_bed = ''
for gene_id in get_gene_list(gene_dict):
tss_sites = []
polyA_sites = []
for transcript in db.children(gene_id, featuretype='transcript'):
start_t = copy.deepcopy(transcript)
stop_t = copy.deepcopy(transcript)
start_t.stop = start_t.start + 1
stop_t.start = stop_t.stop
if transcript.strand == '-':
start_t, stop_t = stop_t, start_t
polyA_sites.append(start_t)
tss_sites.append(stop_t)
merged_polyA_sites = merge_regions(db, polyA_sites)
renamed_polyA_sites = rename_regions(merged_polyA_sites, gene_id)
merged_tss_sites = merge_regions(db, tss_sites)
renamed_tss_sites = rename_regions(merged_tss_sites, gene_id)
polyA_sites_bed += create_bed(renamed_polyA_sites)
tss_sites_bed += create_bed(renamed_tss_sites)
polyA_sites_bedtool = pybedtools.BedTool(polyA_sites_bed, from_string=True)
tss_sites_bedtool = pybedtools.BedTool(tss_sites_bed, from_string=True)
polyA_sites_bedtool.remove_invalid().sort().saveas('{}.polyA_sites.bed'.format(prefix))
tss_sites_bedtool.remove_invalid().sort().saveas('{}.tss_sites.bed'.format(prefix))
Out[15]:
In [16]:
tss = tsses(db, as_bed6=True, merge_overlapping=True)
tss.remove_invalid().sort().saveas('{}.tss_temp.bed'.format(prefix))
promoter = tss.slop(l=1000, r=1000, s=True, g=chrsizes)
promoter.remove_invalid().sort().saveas('{}.promoter.1000.bed'.format(prefix))
Out[16]:
In [17]:
for l in [1000, 2000, 3000, 4000, 5000]:
promoter = tss.slop(l=l, r=l, s=True, g=chrsizes)
promoter.remove_invalid().sort().saveas('{}.promoter.{}.bed'.format(prefix, l))
In [ ]: