-
Notifications
You must be signed in to change notification settings - Fork 1
/
extract_uce_bypass.py
executable file
·298 lines (268 loc) · 10.3 KB
/
extract_uce_bypass.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
#!/usr/bin/env python
# encoding: utf-8
"""
extract_uce_bypass.py
Author: Brant Faircloth, Graham Derryberry
Created by Brant Faircloth on 02 June 2011.
Restructured by Graham Derryberry on 20 March 2013.
Copyright 2011 Brant C. Faircloth. All rights reserved.
Description: Given a fasta file of contigs and a probe file,
output matches of contigs to UCEs
"""
import re
import os
import sys
import glob
import copy
import sqlite3
import argparse
from phyluce import lastz
from phyluce.helpers import is_dir, is_file, FullPaths
from collections import defaultdict
from seqtools.sequence import fasta
from Bio import SeqIO
import pdb
def get_args():
parser = argparse.ArgumentParser(description='Match UCE probes to assembled contigs and output the fasta file')
parser.add_argument(
'contigs',
action=FullPaths,
type=is_file,
help="The contigs file to process"
)
parser.add_argument('query',
action=FullPaths,
type=is_file,
help="The query fasta or 2bit file"
)
parser.add_argument(
"output",
type=str,
help="""The name of the output uce fasta file"""
)
parser.add_argument(
"align",
type=str,
help="""The name of the output alignment file"""
)
parser.add_argument(
'--coverage',
default=80,
type=int)
parser.add_argument(
'--identity',
default=80,
type=int)
parser.add_argument(
'--dupefile',
action=FullPaths,
type=is_file,
help="Path to self-to-self lastz results"
)
parser.add_argument(
"--regex",
type=str,
default=None,
help="""A regular expression to apply to the probe sequences for replacement""",
)
parser.add_argument(
"--repl",
type=str,
default=None,
help="""The replacement text for matches to the regular expression in --regex""",
)
args = parser.parse_args()
if args.regex is not None and args.repl is None:
sys.exit("If you are replacing text with a regular expression you must pass args.repl value")
elif args.repl is not None and args.regex is None:
sys.exit("If you are replacing text with a regular expression you must pass args.regex value")
return args
def create_probe_database(uces):
"""Create the UCE-match database"""
conn = sqlite3.connect(":memory:")
c = conn.cursor()
c.execute("PRAGMA foreign_keys = ON")
try:
# create_string = [org + ' text' for org in organisms]
query = "CREATE TABLE matches (uce text primary key, contigs text)"#.format(','.join(create_string))
c.execute(query)
query = "CREATE TABLE match_map (uce text primary key, contigs text)"#.format(','.join(create_string))
c.execute(query)
for uce in uces:
c.execute("INSERT INTO matches(uce) values (?)", (uce,))
c.execute("INSERT INTO match_map(uce) values (?)", (uce,))
except sqlite3.OperationalError, e:
if e[0] == 'table matches already exists':
answer = raw_input("Database already exists. Overwrite [Y/n]? ")
if answer == "Y" or "YES":
os.remove(db)
conn, c = create_probe_database(db, organisms, uces)
else:
sys.exit(2)
else:
raise sqlite3.OperationalError
pdb.set_trace()
return conn, c
def store_lastz_results_in_db(c, matches, orientation, critter):
"""enter matched loci in database"""
for key, match in matches.iteritems():
# We should have dropped all duplicates at this point
assert len(match) == 1, "More than one match"
item = list(match)[0]
insert_string = "UPDATE matches SET {0} = 1 WHERE uce = '{1}'".format(critter, item)
c.execute(insert_string)
#pdb.set_trace()
orient_key = "{0}({1})".format(key, list(orientation[item])[0])
insert_string = "UPDATE match_map SET {0} = '{1}' WHERE uce = '{2}'".format(critter, orient_key, item)
c.execute(insert_string)
def get_name(header, splitchar="_", items=2, regex=None, repl=None):
"""parse the name of a locus from a file"""
name = "_".join(header.split(splitchar)[:items]).lstrip('>').strip().lower()
if regex is not None and repl is not None:
return re.sub(regex, repl, name)
else:
return name
def get_dupes(lastz_file, regex=None, repl=None):
"""Given a lastz_file of probes aligned to themselves, get duplicates"""
matches = defaultdict(list)
dupes = set()
for lz in lastz.Reader(lastz_file):
target_name = get_name(lz.name1, "|", 1)
query_name = get_name(lz.name2, "|", 1)
matches[target_name].append(query_name)
# see if one probe matches any other probes
# other than the children of the locus
for k, v in matches.iteritems():
# if the probe doesn't match itself, we have
# problems
if len(v) > 1:
for i in v:
if i != k:
dupes.add(k)
dupes.add(i)
elif k != v[0]:
dupes.add(k)
if not regex:
return dupes
else:
return set([re.sub(regex, repl, d).lower() for d in dupes])
def contig_count(contig):
"""Return a count of contigs from a fasta file"""
return sum([1 for line in open(contig, 'rU').readlines() if line.startswith('>')])
def get_organism_names_from_fasta_files(ff):
"""Given a fasta file name, parse taxon name from file name"""
return [os.path.basename(f).split('.')[0].replace('-', "_")
for f in ff]
def check_contigs_for_dupes(matches):
"""check for contigs that match more than 1 UCE locus"""
node_dupes = defaultdict(list)
for node in matches:
node_dupes[node] = len(set(matches[node]))
dupe_set = set([node for node in node_dupes if node_dupes[node] > 1])
return dupe_set
def check_probes_for_dupes(revmatches):
"""Check for UCE probes that match more than one contig"""
dupe_set = set([i for uce, node in revmatches.iteritems()
if len(node) > 1 for i in list(node)])
return dupe_set
def pretty_print_output(critter, matches, contigs, pd, mc, mp):
"""Write some nice output to stdout"""
unique_matches = sum([1 for node, uce in matches.iteritems()])
out = "\t {0}: {1} ({2:.2f}%) uniques of {3} contigs, {4} dupe probe matches, " + \
"{5} UCE probes matching multiple contigs, {6} contigs matching multiple UCE probes"
print out.format(
critter,
unique_matches,
float(unique_matches) / contigs * 100,
contigs,
len(pd),
len(mp),
len(mc)
)
def main():
args = get_args()
if args.regex and args.repl is not None:
# "s_[0-9]+$"
regex = re.compile(args.regex)
uces = set([get_name(read.identifier, "|", 1, regex=regex, repl=args.repl)
for read in fasta.FastaReader(args.query)])
else:
uces = set([get_name(read.identifier, "|", 1)
for read in fasta.FastaReader(args.query)])
regex = None
if args.dupefile:
print "\t Getting dupes"
dupes = get_dupes(args.dupefile, regex, args.repl)
contig = args.contigs#glob.glob(os.path.join(args.contigs, '*.fa*'))
organisms = ["contigs"]#get_organism_names_from_fasta_files(fasta_files)
conn, c = create_probe_database( uces )
print "Processing:"
#for contig in fasta_files:
critter = os.path.basename(contig).split('.')[0].replace('-', "_")
#output = args.align
# os.path.join(
# args.align, \
# os.path.splitext(os.path.basename(contig))[0] + '.lastz'
# )
contigs = contig_count(contig)
# align the probes to the contigs
alignment = lastz.Align(
contig,
args.query,
args.coverage,
args.identity,
args.align
)
lzstdout, lztstderr = alignment.run()
# parse the lastz results of the alignment
matches, orientation, revmatches = \
defaultdict(set), defaultdict(set), defaultdict(set)
probe_dupes = set()
if not lztstderr:
for lz in lastz.Reader(args.align ):
# get strandedness of match
contig_name = get_name(lz.name1)
uce_name = get_name(lz.name2, "|", 1, regex=regex, repl=args.repl)
if args.dupefile and uce_name in dupes:
probe_dupes.add(uce_name)
else:
matches[contig_name].add(uce_name)
orientation[uce_name].add(lz.strand2)
revmatches[uce_name].add(contig_name)
# we need to check nodes for dupe matches to the same probes
contigs_matching_mult_uces = check_contigs_for_dupes(matches)
uces_matching_mult_contigs = check_probes_for_dupes(revmatches)
nodes_to_drop = contigs_matching_mult_uces.union(uces_matching_mult_contigs)
# remove dupe and/or dubious nodes/contigs
match_copy = copy.deepcopy(matches)
for k in match_copy.keys():
if k in nodes_to_drop:
del matches[k]
store_lastz_results_in_db(c, matches, orientation, critter)
conn.commit()
pretty_print_output(
critter,
matches,
contigs,
probe_dupes,
contigs_matching_mult_uces,
uces_matching_mult_contigs
)
# get all the UCE records from the db
query = "SELECT uce, {0} FROM match_map WHERE {0} IS NOT NULL".format("contigs")
c.execute(query)
data = {row[1].split("(")[0]:row[0] for row in c.fetchall()}
nodenames = set(data.keys())
# make sure we don't lose any dupes
assert len(data) == len(nodenames), "There were duplicate contigs."
outp = open(args.output, 'w')
print "Building UCE fasta:"
#for contig in fasta_files:
for record in SeqIO.parse(open(contig), 'fasta'):
name = '_'.join(record.id.split('_')[:2])
if name.lower() in nodenames:
record.id = "{0}|{1}".format(data[name.lower()], record.id)
outp.write(record.format('fasta'))
outp.close()
if __name__ == '__main__':
main()