repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
jay-johnson/network-pipeline | network_pipeline/scripts/icmp_send_msg.py | dump_stats | def dump_stats(myStats):
"""
Show stats when pings are done
"""
print("\n----%s PYTHON PING Statistics----" % (myStats.thisIP))
if myStats.pktsSent > 0:
myStats.fracLoss = (myStats.pktsSent - myStats.pktsRcvd) \
/ myStats.pktsSent
print(("%d packets transmitted, %d packets received, "
"%0.1f%% packet loss") % (
myStats.pktsSent,
myStats.pktsRcvd,
100.0 * myStats.fracLoss
))
if myStats.pktsRcvd > 0:
print("round-trip (ms) min/avg/max = %d/%0.1f/%d" % (
myStats.minTime,
myStats.totTime / myStats.pktsRcvd,
myStats.maxTime
))
print("")
return | python | def dump_stats(myStats):
"""
Show stats when pings are done
"""
print("\n----%s PYTHON PING Statistics----" % (myStats.thisIP))
if myStats.pktsSent > 0:
myStats.fracLoss = (myStats.pktsSent - myStats.pktsRcvd) \
/ myStats.pktsSent
print(("%d packets transmitted, %d packets received, "
"%0.1f%% packet loss") % (
myStats.pktsSent,
myStats.pktsRcvd,
100.0 * myStats.fracLoss
))
if myStats.pktsRcvd > 0:
print("round-trip (ms) min/avg/max = %d/%0.1f/%d" % (
myStats.minTime,
myStats.totTime / myStats.pktsRcvd,
myStats.maxTime
))
print("")
return | [
"def",
"dump_stats",
"(",
"myStats",
")",
":",
"print",
"(",
"\"\\n----%s PYTHON PING Statistics----\"",
"%",
"(",
"myStats",
".",
"thisIP",
")",
")",
"if",
"myStats",
".",
"pktsSent",
">",
"0",
":",
"myStats",
".",
"fracLoss",
"=",
"(",
"myStats",
".",
"pktsSent",
"-",
"myStats",
".",
"pktsRcvd",
")",
"/",
"myStats",
".",
"pktsSent",
"print",
"(",
"(",
"\"%d packets transmitted, %d packets received, \"",
"\"%0.1f%% packet loss\"",
")",
"%",
"(",
"myStats",
".",
"pktsSent",
",",
"myStats",
".",
"pktsRcvd",
",",
"100.0",
"*",
"myStats",
".",
"fracLoss",
")",
")",
"if",
"myStats",
".",
"pktsRcvd",
">",
"0",
":",
"print",
"(",
"\"round-trip (ms) min/avg/max = %d/%0.1f/%d\"",
"%",
"(",
"myStats",
".",
"minTime",
",",
"myStats",
".",
"totTime",
"/",
"myStats",
".",
"pktsRcvd",
",",
"myStats",
".",
"maxTime",
")",
")",
"print",
"(",
"\"\"",
")",
"return"
] | Show stats when pings are done | [
"Show",
"stats",
"when",
"pings",
"are",
"done"
] | 4e53ae13fe12085e0cf2e5e1aff947368f4f1ffa | https://github.com/jay-johnson/network-pipeline/blob/4e53ae13fe12085e0cf2e5e1aff947368f4f1ffa/network_pipeline/scripts/icmp_send_msg.py#L470-L495 | train |
mkouhei/bootstrap-py | bootstrap_py/update.py | Update.updatable | def updatable(self):
"""bootstrap-py package updatable?."""
if self.latest_version > self.current_version:
updatable_version = self.latest_version
else:
updatable_version = False
return updatable_version | python | def updatable(self):
"""bootstrap-py package updatable?."""
if self.latest_version > self.current_version:
updatable_version = self.latest_version
else:
updatable_version = False
return updatable_version | [
"def",
"updatable",
"(",
"self",
")",
":",
"if",
"self",
".",
"latest_version",
">",
"self",
".",
"current_version",
":",
"updatable_version",
"=",
"self",
".",
"latest_version",
"else",
":",
"updatable_version",
"=",
"False",
"return",
"updatable_version"
] | bootstrap-py package updatable?. | [
"bootstrap",
"-",
"py",
"package",
"updatable?",
"."
] | 95d56ed98ef409fd9f019dc352fd1c3711533275 | https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/update.py#L29-L35 | train |
mkouhei/bootstrap-py | bootstrap_py/update.py | Update.show_message | def show_message(self):
"""Show message updatable."""
print(
'current version: {current_version}\n'
'latest version : {latest_version}'.format(
current_version=self.current_version,
latest_version=self.latest_version)) | python | def show_message(self):
"""Show message updatable."""
print(
'current version: {current_version}\n'
'latest version : {latest_version}'.format(
current_version=self.current_version,
latest_version=self.latest_version)) | [
"def",
"show_message",
"(",
"self",
")",
":",
"print",
"(",
"'current version: {current_version}\\n'",
"'latest version : {latest_version}'",
".",
"format",
"(",
"current_version",
"=",
"self",
".",
"current_version",
",",
"latest_version",
"=",
"self",
".",
"latest_version",
")",
")"
] | Show message updatable. | [
"Show",
"message",
"updatable",
"."
] | 95d56ed98ef409fd9f019dc352fd1c3711533275 | https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/update.py#L37-L43 | train |
smdabdoub/phylotoast | bin/pick_otus_condense.py | condense_otus | def condense_otus(otuF, nuniqueF):
"""
Traverse the input otu-sequence file, collect the non-unique OTU IDs and
file the sequences associated with then under the unique OTU ID as defined
by the input matrix.
:@type otuF: file
:@param otuF: The output file from QIIME's pick_otus.py
:@type nuniqueF: file
:@param nuniqueF: The matrix of unique OTU IDs associated to the list of
non-unique OTU IDs they replaced.
:@rtype: dict
:@return: The new condensed table of unique OTU IDs and the sequence IDs
associated with them.
"""
uniqueOTUs = set()
nuOTUs = {}
# parse non-unique otu matrix
for line in nuniqueF:
line = line.split()
uOTU = line[0]
for nuOTU in line[1:]:
nuOTUs[nuOTU] = uOTU
uniqueOTUs.add(uOTU)
otuFilter = defaultdict(list)
# parse otu sequence file
for line in otuF:
line = line.split()
otuID, seqIDs = line[0], line[1:]
if otuID in uniqueOTUs:
otuFilter[otuID].extend(seqIDs)
elif otuID in nuOTUs:
otuFilter[nuOTUs[otuID]].extend(seqIDs)
return otuFilter | python | def condense_otus(otuF, nuniqueF):
"""
Traverse the input otu-sequence file, collect the non-unique OTU IDs and
file the sequences associated with then under the unique OTU ID as defined
by the input matrix.
:@type otuF: file
:@param otuF: The output file from QIIME's pick_otus.py
:@type nuniqueF: file
:@param nuniqueF: The matrix of unique OTU IDs associated to the list of
non-unique OTU IDs they replaced.
:@rtype: dict
:@return: The new condensed table of unique OTU IDs and the sequence IDs
associated with them.
"""
uniqueOTUs = set()
nuOTUs = {}
# parse non-unique otu matrix
for line in nuniqueF:
line = line.split()
uOTU = line[0]
for nuOTU in line[1:]:
nuOTUs[nuOTU] = uOTU
uniqueOTUs.add(uOTU)
otuFilter = defaultdict(list)
# parse otu sequence file
for line in otuF:
line = line.split()
otuID, seqIDs = line[0], line[1:]
if otuID in uniqueOTUs:
otuFilter[otuID].extend(seqIDs)
elif otuID in nuOTUs:
otuFilter[nuOTUs[otuID]].extend(seqIDs)
return otuFilter | [
"def",
"condense_otus",
"(",
"otuF",
",",
"nuniqueF",
")",
":",
"uniqueOTUs",
"=",
"set",
"(",
")",
"nuOTUs",
"=",
"{",
"}",
"# parse non-unique otu matrix",
"for",
"line",
"in",
"nuniqueF",
":",
"line",
"=",
"line",
".",
"split",
"(",
")",
"uOTU",
"=",
"line",
"[",
"0",
"]",
"for",
"nuOTU",
"in",
"line",
"[",
"1",
":",
"]",
":",
"nuOTUs",
"[",
"nuOTU",
"]",
"=",
"uOTU",
"uniqueOTUs",
".",
"add",
"(",
"uOTU",
")",
"otuFilter",
"=",
"defaultdict",
"(",
"list",
")",
"# parse otu sequence file",
"for",
"line",
"in",
"otuF",
":",
"line",
"=",
"line",
".",
"split",
"(",
")",
"otuID",
",",
"seqIDs",
"=",
"line",
"[",
"0",
"]",
",",
"line",
"[",
"1",
":",
"]",
"if",
"otuID",
"in",
"uniqueOTUs",
":",
"otuFilter",
"[",
"otuID",
"]",
".",
"extend",
"(",
"seqIDs",
")",
"elif",
"otuID",
"in",
"nuOTUs",
":",
"otuFilter",
"[",
"nuOTUs",
"[",
"otuID",
"]",
"]",
".",
"extend",
"(",
"seqIDs",
")",
"return",
"otuFilter"
] | Traverse the input otu-sequence file, collect the non-unique OTU IDs and
file the sequences associated with then under the unique OTU ID as defined
by the input matrix.
:@type otuF: file
:@param otuF: The output file from QIIME's pick_otus.py
:@type nuniqueF: file
:@param nuniqueF: The matrix of unique OTU IDs associated to the list of
non-unique OTU IDs they replaced.
:@rtype: dict
:@return: The new condensed table of unique OTU IDs and the sequence IDs
associated with them. | [
"Traverse",
"the",
"input",
"otu",
"-",
"sequence",
"file",
"collect",
"the",
"non",
"-",
"unique",
"OTU",
"IDs",
"and",
"file",
"the",
"sequences",
"associated",
"with",
"then",
"under",
"the",
"unique",
"OTU",
"ID",
"as",
"defined",
"by",
"the",
"input",
"matrix",
"."
] | 0b74ef171e6a84761710548501dfac71285a58a3 | https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/pick_otus_condense.py#L14-L51 | train |
christophertbrown/bioscripts | ctbBio/rRNA_copies.py | rna_bases | def rna_bases(rna_cov, scaffold, bases, line):
"""
determine if read overlaps with rna, if so count bases
"""
start = int(line[3])
stop = start + bases - 1
if scaffold not in rna_cov:
return rna_cov
for pos in rna_cov[scaffold][2]:
ol = get_overlap([start, stop], pos)
rna_cov[scaffold][0] += ol
return rna_cov | python | def rna_bases(rna_cov, scaffold, bases, line):
"""
determine if read overlaps with rna, if so count bases
"""
start = int(line[3])
stop = start + bases - 1
if scaffold not in rna_cov:
return rna_cov
for pos in rna_cov[scaffold][2]:
ol = get_overlap([start, stop], pos)
rna_cov[scaffold][0] += ol
return rna_cov | [
"def",
"rna_bases",
"(",
"rna_cov",
",",
"scaffold",
",",
"bases",
",",
"line",
")",
":",
"start",
"=",
"int",
"(",
"line",
"[",
"3",
"]",
")",
"stop",
"=",
"start",
"+",
"bases",
"-",
"1",
"if",
"scaffold",
"not",
"in",
"rna_cov",
":",
"return",
"rna_cov",
"for",
"pos",
"in",
"rna_cov",
"[",
"scaffold",
"]",
"[",
"2",
"]",
":",
"ol",
"=",
"get_overlap",
"(",
"[",
"start",
",",
"stop",
"]",
",",
"pos",
")",
"rna_cov",
"[",
"scaffold",
"]",
"[",
"0",
"]",
"+=",
"ol",
"return",
"rna_cov"
] | determine if read overlaps with rna, if so count bases | [
"determine",
"if",
"read",
"overlaps",
"with",
"rna",
"if",
"so",
"count",
"bases"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_copies.py#L18-L29 | train |
christophertbrown/bioscripts | ctbBio/rRNA_copies.py | parse_s2bins | def parse_s2bins(s2bins):
"""
parse ggKbase scaffold-to-bin mapping
- scaffolds-to-bins and bins-to-scaffolds
"""
s2b = {}
b2s = {}
for line in s2bins:
line = line.strip().split()
s, b = line[0], line[1]
if 'UNK' in b:
continue
if len(line) > 2:
g = ' '.join(line[2:])
else:
g = 'n/a'
b = '%s\t%s' % (b, g)
s2b[s] = b
if b not in b2s:
b2s[b] = []
b2s[b].append(s)
return s2b, b2s | python | def parse_s2bins(s2bins):
"""
parse ggKbase scaffold-to-bin mapping
- scaffolds-to-bins and bins-to-scaffolds
"""
s2b = {}
b2s = {}
for line in s2bins:
line = line.strip().split()
s, b = line[0], line[1]
if 'UNK' in b:
continue
if len(line) > 2:
g = ' '.join(line[2:])
else:
g = 'n/a'
b = '%s\t%s' % (b, g)
s2b[s] = b
if b not in b2s:
b2s[b] = []
b2s[b].append(s)
return s2b, b2s | [
"def",
"parse_s2bins",
"(",
"s2bins",
")",
":",
"s2b",
"=",
"{",
"}",
"b2s",
"=",
"{",
"}",
"for",
"line",
"in",
"s2bins",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"s",
",",
"b",
"=",
"line",
"[",
"0",
"]",
",",
"line",
"[",
"1",
"]",
"if",
"'UNK'",
"in",
"b",
":",
"continue",
"if",
"len",
"(",
"line",
")",
">",
"2",
":",
"g",
"=",
"' '",
".",
"join",
"(",
"line",
"[",
"2",
":",
"]",
")",
"else",
":",
"g",
"=",
"'n/a'",
"b",
"=",
"'%s\\t%s'",
"%",
"(",
"b",
",",
"g",
")",
"s2b",
"[",
"s",
"]",
"=",
"b",
"if",
"b",
"not",
"in",
"b2s",
":",
"b2s",
"[",
"b",
"]",
"=",
"[",
"]",
"b2s",
"[",
"b",
"]",
".",
"append",
"(",
"s",
")",
"return",
"s2b",
",",
"b2s"
] | parse ggKbase scaffold-to-bin mapping
- scaffolds-to-bins and bins-to-scaffolds | [
"parse",
"ggKbase",
"scaffold",
"-",
"to",
"-",
"bin",
"mapping",
"-",
"scaffolds",
"-",
"to",
"-",
"bins",
"and",
"bins",
"-",
"to",
"-",
"scaffolds"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_copies.py#L31-L52 | train |
christophertbrown/bioscripts | ctbBio/rRNA_copies.py | filter_missing_rna | def filter_missing_rna(s2bins, bins2s, rna_cov):
"""
remove any bins that don't have 16S
"""
for bin, scaffolds in list(bins2s.items()):
c = 0
for s in scaffolds:
if s in rna_cov:
c += 1
if c == 0:
del bins2s[bin]
for scaffold, bin in list(s2bins.items()):
if bin not in bins2s:
del s2bins[scaffold]
return s2bins, bins2s | python | def filter_missing_rna(s2bins, bins2s, rna_cov):
"""
remove any bins that don't have 16S
"""
for bin, scaffolds in list(bins2s.items()):
c = 0
for s in scaffolds:
if s in rna_cov:
c += 1
if c == 0:
del bins2s[bin]
for scaffold, bin in list(s2bins.items()):
if bin not in bins2s:
del s2bins[scaffold]
return s2bins, bins2s | [
"def",
"filter_missing_rna",
"(",
"s2bins",
",",
"bins2s",
",",
"rna_cov",
")",
":",
"for",
"bin",
",",
"scaffolds",
"in",
"list",
"(",
"bins2s",
".",
"items",
"(",
")",
")",
":",
"c",
"=",
"0",
"for",
"s",
"in",
"scaffolds",
":",
"if",
"s",
"in",
"rna_cov",
":",
"c",
"+=",
"1",
"if",
"c",
"==",
"0",
":",
"del",
"bins2s",
"[",
"bin",
"]",
"for",
"scaffold",
",",
"bin",
"in",
"list",
"(",
"s2bins",
".",
"items",
"(",
")",
")",
":",
"if",
"bin",
"not",
"in",
"bins2s",
":",
"del",
"s2bins",
"[",
"scaffold",
"]",
"return",
"s2bins",
",",
"bins2s"
] | remove any bins that don't have 16S | [
"remove",
"any",
"bins",
"that",
"don",
"t",
"have",
"16S"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_copies.py#L76-L90 | train |
christophertbrown/bioscripts | ctbBio/rRNA_copies.py | calc_bin_cov | def calc_bin_cov(scaffolds, cov):
"""
calculate bin coverage
"""
bases = sum([cov[i][0] for i in scaffolds if i in cov])
length = sum([cov[i][1] for i in scaffolds if i in cov])
if length == 0:
return 0
return float(float(bases)/float(length)) | python | def calc_bin_cov(scaffolds, cov):
"""
calculate bin coverage
"""
bases = sum([cov[i][0] for i in scaffolds if i in cov])
length = sum([cov[i][1] for i in scaffolds if i in cov])
if length == 0:
return 0
return float(float(bases)/float(length)) | [
"def",
"calc_bin_cov",
"(",
"scaffolds",
",",
"cov",
")",
":",
"bases",
"=",
"sum",
"(",
"[",
"cov",
"[",
"i",
"]",
"[",
"0",
"]",
"for",
"i",
"in",
"scaffolds",
"if",
"i",
"in",
"cov",
"]",
")",
"length",
"=",
"sum",
"(",
"[",
"cov",
"[",
"i",
"]",
"[",
"1",
"]",
"for",
"i",
"in",
"scaffolds",
"if",
"i",
"in",
"cov",
"]",
")",
"if",
"length",
"==",
"0",
":",
"return",
"0",
"return",
"float",
"(",
"float",
"(",
"bases",
")",
"/",
"float",
"(",
"length",
")",
")"
] | calculate bin coverage | [
"calculate",
"bin",
"coverage"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_copies.py#L92-L100 | train |
dokterbob/django-multilingual-model | multilingual_model/forms.py | TranslationFormSet.clean | def clean(self):
"""
Make sure there is at least a translation has been filled in. If a
default language has been specified, make sure that it exists amongst
translations.
"""
# First make sure the super's clean method is called upon.
super(TranslationFormSet, self).clean()
if settings.HIDE_LANGUAGE:
return
if len(self.forms) > 0:
# If a default language has been provided, make sure a translation
# is available
if settings.DEFAULT_LANGUAGE and not any(self.errors):
# Don't bother validating the formset unless each form is
# valid on its own. Reference:
# http://docs.djangoproject.com/en/dev/topics/forms/formsets/#custom-formset-validation
for form in self.forms:
language_code = form.cleaned_data.get(
'language_code', None
)
if language_code == settings.DEFAULT_LANGUAGE:
# All is good, don't bother checking any further
return
raise forms.ValidationError(_(
'No translation provided for default language \'%s\'.'
) % settings.DEFAULT_LANGUAGE)
else:
raise forms.ValidationError(
_('At least one translation should be provided.')
) | python | def clean(self):
"""
Make sure there is at least a translation has been filled in. If a
default language has been specified, make sure that it exists amongst
translations.
"""
# First make sure the super's clean method is called upon.
super(TranslationFormSet, self).clean()
if settings.HIDE_LANGUAGE:
return
if len(self.forms) > 0:
# If a default language has been provided, make sure a translation
# is available
if settings.DEFAULT_LANGUAGE and not any(self.errors):
# Don't bother validating the formset unless each form is
# valid on its own. Reference:
# http://docs.djangoproject.com/en/dev/topics/forms/formsets/#custom-formset-validation
for form in self.forms:
language_code = form.cleaned_data.get(
'language_code', None
)
if language_code == settings.DEFAULT_LANGUAGE:
# All is good, don't bother checking any further
return
raise forms.ValidationError(_(
'No translation provided for default language \'%s\'.'
) % settings.DEFAULT_LANGUAGE)
else:
raise forms.ValidationError(
_('At least one translation should be provided.')
) | [
"def",
"clean",
"(",
"self",
")",
":",
"# First make sure the super's clean method is called upon.",
"super",
"(",
"TranslationFormSet",
",",
"self",
")",
".",
"clean",
"(",
")",
"if",
"settings",
".",
"HIDE_LANGUAGE",
":",
"return",
"if",
"len",
"(",
"self",
".",
"forms",
")",
">",
"0",
":",
"# If a default language has been provided, make sure a translation",
"# is available",
"if",
"settings",
".",
"DEFAULT_LANGUAGE",
"and",
"not",
"any",
"(",
"self",
".",
"errors",
")",
":",
"# Don't bother validating the formset unless each form is",
"# valid on its own. Reference:",
"# http://docs.djangoproject.com/en/dev/topics/forms/formsets/#custom-formset-validation",
"for",
"form",
"in",
"self",
".",
"forms",
":",
"language_code",
"=",
"form",
".",
"cleaned_data",
".",
"get",
"(",
"'language_code'",
",",
"None",
")",
"if",
"language_code",
"==",
"settings",
".",
"DEFAULT_LANGUAGE",
":",
"# All is good, don't bother checking any further",
"return",
"raise",
"forms",
".",
"ValidationError",
"(",
"_",
"(",
"'No translation provided for default language \\'%s\\'.'",
")",
"%",
"settings",
".",
"DEFAULT_LANGUAGE",
")",
"else",
":",
"raise",
"forms",
".",
"ValidationError",
"(",
"_",
"(",
"'At least one translation should be provided.'",
")",
")"
] | Make sure there is at least a translation has been filled in. If a
default language has been specified, make sure that it exists amongst
translations. | [
"Make",
"sure",
"there",
"is",
"at",
"least",
"a",
"translation",
"has",
"been",
"filled",
"in",
".",
"If",
"a",
"default",
"language",
"has",
"been",
"specified",
"make",
"sure",
"that",
"it",
"exists",
"amongst",
"translations",
"."
] | 2479b2c3d6f7b697e95aa1e082c8bc8699f1f638 | https://github.com/dokterbob/django-multilingual-model/blob/2479b2c3d6f7b697e95aa1e082c8bc8699f1f638/multilingual_model/forms.py#L19-L58 | train |
dokterbob/django-multilingual-model | multilingual_model/forms.py | TranslationFormSet._get_default_language | def _get_default_language(self):
"""
If a default language has been set, and is still available in
`self.available_languages`, return it and remove it from the list.
If not, simply pop the first available language.
"""
assert hasattr(self, 'available_languages'), \
'No available languages have been generated.'
assert len(self.available_languages) > 0, \
'No available languages to select from.'
if (
settings.DEFAULT_LANGUAGE and
settings.DEFAULT_LANGUAGE in self.available_languages
) or (
'language_code' not in self.form.base_fields
):
# Default language still available
self.available_languages.remove(settings.DEFAULT_LANGUAGE)
return settings.DEFAULT_LANGUAGE
else:
# Select the first item and return it
return self.available_languages.pop(0) | python | def _get_default_language(self):
"""
If a default language has been set, and is still available in
`self.available_languages`, return it and remove it from the list.
If not, simply pop the first available language.
"""
assert hasattr(self, 'available_languages'), \
'No available languages have been generated.'
assert len(self.available_languages) > 0, \
'No available languages to select from.'
if (
settings.DEFAULT_LANGUAGE and
settings.DEFAULT_LANGUAGE in self.available_languages
) or (
'language_code' not in self.form.base_fields
):
# Default language still available
self.available_languages.remove(settings.DEFAULT_LANGUAGE)
return settings.DEFAULT_LANGUAGE
else:
# Select the first item and return it
return self.available_languages.pop(0) | [
"def",
"_get_default_language",
"(",
"self",
")",
":",
"assert",
"hasattr",
"(",
"self",
",",
"'available_languages'",
")",
",",
"'No available languages have been generated.'",
"assert",
"len",
"(",
"self",
".",
"available_languages",
")",
">",
"0",
",",
"'No available languages to select from.'",
"if",
"(",
"settings",
".",
"DEFAULT_LANGUAGE",
"and",
"settings",
".",
"DEFAULT_LANGUAGE",
"in",
"self",
".",
"available_languages",
")",
"or",
"(",
"'language_code'",
"not",
"in",
"self",
".",
"form",
".",
"base_fields",
")",
":",
"# Default language still available",
"self",
".",
"available_languages",
".",
"remove",
"(",
"settings",
".",
"DEFAULT_LANGUAGE",
")",
"return",
"settings",
".",
"DEFAULT_LANGUAGE",
"else",
":",
"# Select the first item and return it",
"return",
"self",
".",
"available_languages",
".",
"pop",
"(",
"0",
")"
] | If a default language has been set, and is still available in
`self.available_languages`, return it and remove it from the list.
If not, simply pop the first available language. | [
"If",
"a",
"default",
"language",
"has",
"been",
"set",
"and",
"is",
"still",
"available",
"in",
"self",
".",
"available_languages",
"return",
"it",
"and",
"remove",
"it",
"from",
"the",
"list",
"."
] | 2479b2c3d6f7b697e95aa1e082c8bc8699f1f638 | https://github.com/dokterbob/django-multilingual-model/blob/2479b2c3d6f7b697e95aa1e082c8bc8699f1f638/multilingual_model/forms.py#L68-L94 | train |
dokterbob/django-multilingual-model | multilingual_model/forms.py | TranslationFormSet._construct_form | def _construct_form(self, i, **kwargs):
"""
Construct the form, overriding the initial value for `language_code`.
"""
if not settings.HIDE_LANGUAGE:
self._construct_available_languages()
form = super(TranslationFormSet, self)._construct_form(i, **kwargs)
if settings.HIDE_LANGUAGE:
form.instance.language_code = settings.DEFAULT_LANGUAGE
else:
language_code = form.instance.language_code
if language_code:
logger.debug(
u'Removing translation choice %s for instance %s'
u' in form %d', language_code, form.instance, i
)
self.available_languages.remove(language_code)
else:
initial_language_code = self._get_default_language()
logger.debug(
u'Preselecting language code %s for form %d',
initial_language_code, i
)
form.initial['language_code'] = initial_language_code
return form | python | def _construct_form(self, i, **kwargs):
"""
Construct the form, overriding the initial value for `language_code`.
"""
if not settings.HIDE_LANGUAGE:
self._construct_available_languages()
form = super(TranslationFormSet, self)._construct_form(i, **kwargs)
if settings.HIDE_LANGUAGE:
form.instance.language_code = settings.DEFAULT_LANGUAGE
else:
language_code = form.instance.language_code
if language_code:
logger.debug(
u'Removing translation choice %s for instance %s'
u' in form %d', language_code, form.instance, i
)
self.available_languages.remove(language_code)
else:
initial_language_code = self._get_default_language()
logger.debug(
u'Preselecting language code %s for form %d',
initial_language_code, i
)
form.initial['language_code'] = initial_language_code
return form | [
"def",
"_construct_form",
"(",
"self",
",",
"i",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"settings",
".",
"HIDE_LANGUAGE",
":",
"self",
".",
"_construct_available_languages",
"(",
")",
"form",
"=",
"super",
"(",
"TranslationFormSet",
",",
"self",
")",
".",
"_construct_form",
"(",
"i",
",",
"*",
"*",
"kwargs",
")",
"if",
"settings",
".",
"HIDE_LANGUAGE",
":",
"form",
".",
"instance",
".",
"language_code",
"=",
"settings",
".",
"DEFAULT_LANGUAGE",
"else",
":",
"language_code",
"=",
"form",
".",
"instance",
".",
"language_code",
"if",
"language_code",
":",
"logger",
".",
"debug",
"(",
"u'Removing translation choice %s for instance %s'",
"u' in form %d'",
",",
"language_code",
",",
"form",
".",
"instance",
",",
"i",
")",
"self",
".",
"available_languages",
".",
"remove",
"(",
"language_code",
")",
"else",
":",
"initial_language_code",
"=",
"self",
".",
"_get_default_language",
"(",
")",
"logger",
".",
"debug",
"(",
"u'Preselecting language code %s for form %d'",
",",
"initial_language_code",
",",
"i",
")",
"form",
".",
"initial",
"[",
"'language_code'",
"]",
"=",
"initial_language_code",
"return",
"form"
] | Construct the form, overriding the initial value for `language_code`. | [
"Construct",
"the",
"form",
"overriding",
"the",
"initial",
"value",
"for",
"language_code",
"."
] | 2479b2c3d6f7b697e95aa1e082c8bc8699f1f638 | https://github.com/dokterbob/django-multilingual-model/blob/2479b2c3d6f7b697e95aa1e082c8bc8699f1f638/multilingual_model/forms.py#L96-L128 | train |
christophertbrown/bioscripts | ctbBio/fastq_merge.py | fq_merge | def fq_merge(R1, R2):
"""
merge separate fastq files
"""
c = itertools.cycle([1, 2, 3, 4])
for r1, r2 in zip(R1, R2):
n = next(c)
if n == 1:
pair = [[], []]
pair[0].append(r1.strip())
pair[1].append(r2.strip())
if n == 4:
yield pair | python | def fq_merge(R1, R2):
"""
merge separate fastq files
"""
c = itertools.cycle([1, 2, 3, 4])
for r1, r2 in zip(R1, R2):
n = next(c)
if n == 1:
pair = [[], []]
pair[0].append(r1.strip())
pair[1].append(r2.strip())
if n == 4:
yield pair | [
"def",
"fq_merge",
"(",
"R1",
",",
"R2",
")",
":",
"c",
"=",
"itertools",
".",
"cycle",
"(",
"[",
"1",
",",
"2",
",",
"3",
",",
"4",
"]",
")",
"for",
"r1",
",",
"r2",
"in",
"zip",
"(",
"R1",
",",
"R2",
")",
":",
"n",
"=",
"next",
"(",
"c",
")",
"if",
"n",
"==",
"1",
":",
"pair",
"=",
"[",
"[",
"]",
",",
"[",
"]",
"]",
"pair",
"[",
"0",
"]",
".",
"append",
"(",
"r1",
".",
"strip",
"(",
")",
")",
"pair",
"[",
"1",
"]",
".",
"append",
"(",
"r2",
".",
"strip",
"(",
")",
")",
"if",
"n",
"==",
"4",
":",
"yield",
"pair"
] | merge separate fastq files | [
"merge",
"separate",
"fastq",
"files"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/fastq_merge.py#L13-L25 | train |
disqus/nydus | nydus/contrib/ketama.py | Ketama._build_circle | def _build_circle(self):
"""
Creates hash ring.
"""
total_weight = 0
for node in self._nodes:
total_weight += self._weights.get(node, 1)
for node in self._nodes:
weight = self._weights.get(node, 1)
ks = math.floor((40 * len(self._nodes) * weight) / total_weight)
for i in xrange(0, int(ks)):
b_key = self._md5_digest('%s-%s-salt' % (node, i))
for l in xrange(0, 4):
key = ((b_key[3 + l * 4] << 24)
| (b_key[2 + l * 4] << 16)
| (b_key[1 + l * 4] << 8)
| b_key[l * 4])
self._hashring[key] = node
self._sorted_keys.append(key)
self._sorted_keys.sort() | python | def _build_circle(self):
"""
Creates hash ring.
"""
total_weight = 0
for node in self._nodes:
total_weight += self._weights.get(node, 1)
for node in self._nodes:
weight = self._weights.get(node, 1)
ks = math.floor((40 * len(self._nodes) * weight) / total_weight)
for i in xrange(0, int(ks)):
b_key = self._md5_digest('%s-%s-salt' % (node, i))
for l in xrange(0, 4):
key = ((b_key[3 + l * 4] << 24)
| (b_key[2 + l * 4] << 16)
| (b_key[1 + l * 4] << 8)
| b_key[l * 4])
self._hashring[key] = node
self._sorted_keys.append(key)
self._sorted_keys.sort() | [
"def",
"_build_circle",
"(",
"self",
")",
":",
"total_weight",
"=",
"0",
"for",
"node",
"in",
"self",
".",
"_nodes",
":",
"total_weight",
"+=",
"self",
".",
"_weights",
".",
"get",
"(",
"node",
",",
"1",
")",
"for",
"node",
"in",
"self",
".",
"_nodes",
":",
"weight",
"=",
"self",
".",
"_weights",
".",
"get",
"(",
"node",
",",
"1",
")",
"ks",
"=",
"math",
".",
"floor",
"(",
"(",
"40",
"*",
"len",
"(",
"self",
".",
"_nodes",
")",
"*",
"weight",
")",
"/",
"total_weight",
")",
"for",
"i",
"in",
"xrange",
"(",
"0",
",",
"int",
"(",
"ks",
")",
")",
":",
"b_key",
"=",
"self",
".",
"_md5_digest",
"(",
"'%s-%s-salt'",
"%",
"(",
"node",
",",
"i",
")",
")",
"for",
"l",
"in",
"xrange",
"(",
"0",
",",
"4",
")",
":",
"key",
"=",
"(",
"(",
"b_key",
"[",
"3",
"+",
"l",
"*",
"4",
"]",
"<<",
"24",
")",
"|",
"(",
"b_key",
"[",
"2",
"+",
"l",
"*",
"4",
"]",
"<<",
"16",
")",
"|",
"(",
"b_key",
"[",
"1",
"+",
"l",
"*",
"4",
"]",
"<<",
"8",
")",
"|",
"b_key",
"[",
"l",
"*",
"4",
"]",
")",
"self",
".",
"_hashring",
"[",
"key",
"]",
"=",
"node",
"self",
".",
"_sorted_keys",
".",
"append",
"(",
"key",
")",
"self",
".",
"_sorted_keys",
".",
"sort",
"(",
")"
] | Creates hash ring. | [
"Creates",
"hash",
"ring",
"."
] | 9b505840da47a34f758a830c3992fa5dcb7bb7ad | https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/contrib/ketama.py#L35-L60 | train |
disqus/nydus | nydus/contrib/ketama.py | Ketama._gen_key | def _gen_key(self, key):
"""
Return long integer for a given key, that represent it place on
the hash ring.
"""
b_key = self._md5_digest(key)
return self._hashi(b_key, lambda x: x) | python | def _gen_key(self, key):
"""
Return long integer for a given key, that represent it place on
the hash ring.
"""
b_key = self._md5_digest(key)
return self._hashi(b_key, lambda x: x) | [
"def",
"_gen_key",
"(",
"self",
",",
"key",
")",
":",
"b_key",
"=",
"self",
".",
"_md5_digest",
"(",
"key",
")",
"return",
"self",
".",
"_hashi",
"(",
"b_key",
",",
"lambda",
"x",
":",
"x",
")"
] | Return long integer for a given key, that represent it place on
the hash ring. | [
"Return",
"long",
"integer",
"for",
"a",
"given",
"key",
"that",
"represent",
"it",
"place",
"on",
"the",
"hash",
"ring",
"."
] | 9b505840da47a34f758a830c3992fa5dcb7bb7ad | https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/contrib/ketama.py#L78-L84 | train |
scottrice/pysteam | pysteam/grid.py | has_custom_image | def has_custom_image(user_context, app_id):
"""Returns True if there exists a custom image for app_id."""
possible_paths = _valid_custom_image_paths(user_context, app_id)
return any(map(os.path.exists, possible_paths)) | python | def has_custom_image(user_context, app_id):
"""Returns True if there exists a custom image for app_id."""
possible_paths = _valid_custom_image_paths(user_context, app_id)
return any(map(os.path.exists, possible_paths)) | [
"def",
"has_custom_image",
"(",
"user_context",
",",
"app_id",
")",
":",
"possible_paths",
"=",
"_valid_custom_image_paths",
"(",
"user_context",
",",
"app_id",
")",
"return",
"any",
"(",
"map",
"(",
"os",
".",
"path",
".",
"exists",
",",
"possible_paths",
")",
")"
] | Returns True if there exists a custom image for app_id. | [
"Returns",
"True",
"if",
"there",
"exists",
"a",
"custom",
"image",
"for",
"app_id",
"."
] | 1eb2254b5235a053a953e596fa7602d0b110245d | https://github.com/scottrice/pysteam/blob/1eb2254b5235a053a953e596fa7602d0b110245d/pysteam/grid.py#L32-L35 | train |
scottrice/pysteam | pysteam/grid.py | get_custom_image | def get_custom_image(user_context, app_id):
"""Returns the custom image associated with a given app. If there are
multiple candidate images on disk, one is chosen arbitrarily."""
possible_paths = _valid_custom_image_paths(user_context, app_id)
existing_images = filter(os.path.exists, possible_paths)
if len(existing_images) > 0:
return existing_images[0] | python | def get_custom_image(user_context, app_id):
"""Returns the custom image associated with a given app. If there are
multiple candidate images on disk, one is chosen arbitrarily."""
possible_paths = _valid_custom_image_paths(user_context, app_id)
existing_images = filter(os.path.exists, possible_paths)
if len(existing_images) > 0:
return existing_images[0] | [
"def",
"get_custom_image",
"(",
"user_context",
",",
"app_id",
")",
":",
"possible_paths",
"=",
"_valid_custom_image_paths",
"(",
"user_context",
",",
"app_id",
")",
"existing_images",
"=",
"filter",
"(",
"os",
".",
"path",
".",
"exists",
",",
"possible_paths",
")",
"if",
"len",
"(",
"existing_images",
")",
">",
"0",
":",
"return",
"existing_images",
"[",
"0",
"]"
] | Returns the custom image associated with a given app. If there are
multiple candidate images on disk, one is chosen arbitrarily. | [
"Returns",
"the",
"custom",
"image",
"associated",
"with",
"a",
"given",
"app",
".",
"If",
"there",
"are",
"multiple",
"candidate",
"images",
"on",
"disk",
"one",
"is",
"chosen",
"arbitrarily",
"."
] | 1eb2254b5235a053a953e596fa7602d0b110245d | https://github.com/scottrice/pysteam/blob/1eb2254b5235a053a953e596fa7602d0b110245d/pysteam/grid.py#L37-L43 | train |
scottrice/pysteam | pysteam/grid.py | set_custom_image | def set_custom_image(user_context, app_id, image_path):
"""Sets the custom image for `app_id` to be the image located at
`image_path`. If there already exists a custom image for `app_id` it will
be deleted. Returns True is setting the image was successful."""
if image_path is None:
return False
if not os.path.exists(image_path):
return False
(root, ext) = os.path.splitext(image_path)
if not is_valid_extension(ext):
# TODO: Maybe log that this happened?
return False
# If we don't remove the old image then theres no guarantee that Steam will
# show our new image when it launches.
if has_custom_image(user_context, app_id):
img = get_custom_image(user_context, app_id)
assert(img is not None)
os.remove(img)
# Set the new image
parent_dir = paths.custom_images_directory(user_context)
new_path = os.path.join(parent_dir, app_id + ext)
shutil.copyfile(image_path, new_path)
return True | python | def set_custom_image(user_context, app_id, image_path):
"""Sets the custom image for `app_id` to be the image located at
`image_path`. If there already exists a custom image for `app_id` it will
be deleted. Returns True is setting the image was successful."""
if image_path is None:
return False
if not os.path.exists(image_path):
return False
(root, ext) = os.path.splitext(image_path)
if not is_valid_extension(ext):
# TODO: Maybe log that this happened?
return False
# If we don't remove the old image then theres no guarantee that Steam will
# show our new image when it launches.
if has_custom_image(user_context, app_id):
img = get_custom_image(user_context, app_id)
assert(img is not None)
os.remove(img)
# Set the new image
parent_dir = paths.custom_images_directory(user_context)
new_path = os.path.join(parent_dir, app_id + ext)
shutil.copyfile(image_path, new_path)
return True | [
"def",
"set_custom_image",
"(",
"user_context",
",",
"app_id",
",",
"image_path",
")",
":",
"if",
"image_path",
"is",
"None",
":",
"return",
"False",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"image_path",
")",
":",
"return",
"False",
"(",
"root",
",",
"ext",
")",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"image_path",
")",
"if",
"not",
"is_valid_extension",
"(",
"ext",
")",
":",
"# TODO: Maybe log that this happened?",
"return",
"False",
"# If we don't remove the old image then theres no guarantee that Steam will",
"# show our new image when it launches.",
"if",
"has_custom_image",
"(",
"user_context",
",",
"app_id",
")",
":",
"img",
"=",
"get_custom_image",
"(",
"user_context",
",",
"app_id",
")",
"assert",
"(",
"img",
"is",
"not",
"None",
")",
"os",
".",
"remove",
"(",
"img",
")",
"# Set the new image",
"parent_dir",
"=",
"paths",
".",
"custom_images_directory",
"(",
"user_context",
")",
"new_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"parent_dir",
",",
"app_id",
"+",
"ext",
")",
"shutil",
".",
"copyfile",
"(",
"image_path",
",",
"new_path",
")",
"return",
"True"
] | Sets the custom image for `app_id` to be the image located at
`image_path`. If there already exists a custom image for `app_id` it will
be deleted. Returns True is setting the image was successful. | [
"Sets",
"the",
"custom",
"image",
"for",
"app_id",
"to",
"be",
"the",
"image",
"located",
"at",
"image_path",
".",
"If",
"there",
"already",
"exists",
"a",
"custom",
"image",
"for",
"app_id",
"it",
"will",
"be",
"deleted",
".",
"Returns",
"True",
"is",
"setting",
"the",
"image",
"was",
"successful",
"."
] | 1eb2254b5235a053a953e596fa7602d0b110245d | https://github.com/scottrice/pysteam/blob/1eb2254b5235a053a953e596fa7602d0b110245d/pysteam/grid.py#L45-L70 | train |
cldf/segments | src/segments/profile.py | Profile.from_file | def from_file(cls, fname, form=None):
"""
Read an orthography profile from a metadata file or a default tab-separated profile file.
"""
try:
tg = TableGroup.from_file(fname)
opfname = None
except JSONDecodeError:
tg = TableGroup.fromvalue(cls.MD)
opfname = fname
if len(tg.tables) != 1:
raise ValueError('profile description must contain exactly one table')
metadata = tg.common_props
metadata.update(fname=Path(fname), form=form)
return cls(
*[{k: None if (k != cls.GRAPHEME_COL and v == cls.NULL) else v for k, v in d.items()}
for d in tg.tables[0].iterdicts(fname=opfname)],
**metadata) | python | def from_file(cls, fname, form=None):
"""
Read an orthography profile from a metadata file or a default tab-separated profile file.
"""
try:
tg = TableGroup.from_file(fname)
opfname = None
except JSONDecodeError:
tg = TableGroup.fromvalue(cls.MD)
opfname = fname
if len(tg.tables) != 1:
raise ValueError('profile description must contain exactly one table')
metadata = tg.common_props
metadata.update(fname=Path(fname), form=form)
return cls(
*[{k: None if (k != cls.GRAPHEME_COL and v == cls.NULL) else v for k, v in d.items()}
for d in tg.tables[0].iterdicts(fname=opfname)],
**metadata) | [
"def",
"from_file",
"(",
"cls",
",",
"fname",
",",
"form",
"=",
"None",
")",
":",
"try",
":",
"tg",
"=",
"TableGroup",
".",
"from_file",
"(",
"fname",
")",
"opfname",
"=",
"None",
"except",
"JSONDecodeError",
":",
"tg",
"=",
"TableGroup",
".",
"fromvalue",
"(",
"cls",
".",
"MD",
")",
"opfname",
"=",
"fname",
"if",
"len",
"(",
"tg",
".",
"tables",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"'profile description must contain exactly one table'",
")",
"metadata",
"=",
"tg",
".",
"common_props",
"metadata",
".",
"update",
"(",
"fname",
"=",
"Path",
"(",
"fname",
")",
",",
"form",
"=",
"form",
")",
"return",
"cls",
"(",
"*",
"[",
"{",
"k",
":",
"None",
"if",
"(",
"k",
"!=",
"cls",
".",
"GRAPHEME_COL",
"and",
"v",
"==",
"cls",
".",
"NULL",
")",
"else",
"v",
"for",
"k",
",",
"v",
"in",
"d",
".",
"items",
"(",
")",
"}",
"for",
"d",
"in",
"tg",
".",
"tables",
"[",
"0",
"]",
".",
"iterdicts",
"(",
"fname",
"=",
"opfname",
")",
"]",
",",
"*",
"*",
"metadata",
")"
] | Read an orthography profile from a metadata file or a default tab-separated profile file. | [
"Read",
"an",
"orthography",
"profile",
"from",
"a",
"metadata",
"file",
"or",
"a",
"default",
"tab",
"-",
"separated",
"profile",
"file",
"."
] | 9136a4ec89555bf9b574399ffbb07f3cc9a9f45f | https://github.com/cldf/segments/blob/9136a4ec89555bf9b574399ffbb07f3cc9a9f45f/src/segments/profile.py#L100-L117 | train |
cldf/segments | src/segments/profile.py | Profile.from_text | def from_text(cls, text, mapping='mapping'):
"""
Create a Profile instance from the Unicode graphemes found in `text`.
Parameters
----------
text
mapping
Returns
-------
A Profile instance.
"""
graphemes = Counter(grapheme_pattern.findall(text))
specs = [
OrderedDict([
(cls.GRAPHEME_COL, grapheme),
('frequency', frequency),
(mapping, grapheme)])
for grapheme, frequency in graphemes.most_common()]
return cls(*specs) | python | def from_text(cls, text, mapping='mapping'):
"""
Create a Profile instance from the Unicode graphemes found in `text`.
Parameters
----------
text
mapping
Returns
-------
A Profile instance.
"""
graphemes = Counter(grapheme_pattern.findall(text))
specs = [
OrderedDict([
(cls.GRAPHEME_COL, grapheme),
('frequency', frequency),
(mapping, grapheme)])
for grapheme, frequency in graphemes.most_common()]
return cls(*specs) | [
"def",
"from_text",
"(",
"cls",
",",
"text",
",",
"mapping",
"=",
"'mapping'",
")",
":",
"graphemes",
"=",
"Counter",
"(",
"grapheme_pattern",
".",
"findall",
"(",
"text",
")",
")",
"specs",
"=",
"[",
"OrderedDict",
"(",
"[",
"(",
"cls",
".",
"GRAPHEME_COL",
",",
"grapheme",
")",
",",
"(",
"'frequency'",
",",
"frequency",
")",
",",
"(",
"mapping",
",",
"grapheme",
")",
"]",
")",
"for",
"grapheme",
",",
"frequency",
"in",
"graphemes",
".",
"most_common",
"(",
")",
"]",
"return",
"cls",
"(",
"*",
"specs",
")"
] | Create a Profile instance from the Unicode graphemes found in `text`.
Parameters
----------
text
mapping
Returns
-------
A Profile instance. | [
"Create",
"a",
"Profile",
"instance",
"from",
"the",
"Unicode",
"graphemes",
"found",
"in",
"text",
"."
] | 9136a4ec89555bf9b574399ffbb07f3cc9a9f45f | https://github.com/cldf/segments/blob/9136a4ec89555bf9b574399ffbb07f3cc9a9f45f/src/segments/profile.py#L120-L141 | train |
christophertbrown/bioscripts | ctbBio/name2fasta.py | split_fasta | def split_fasta(f, id2f):
"""
split fasta file into separate fasta files based on list of scaffolds
that belong to each separate file
"""
opened = {}
for seq in parse_fasta(f):
id = seq[0].split('>')[1].split()[0]
if id not in id2f:
continue
fasta = id2f[id]
if fasta not in opened:
opened[fasta] = '%s.fa' % fasta
seq[1] += '\n'
with open(opened[fasta], 'a+') as f_out:
f_out.write('\n'.join(seq)) | python | def split_fasta(f, id2f):
"""
split fasta file into separate fasta files based on list of scaffolds
that belong to each separate file
"""
opened = {}
for seq in parse_fasta(f):
id = seq[0].split('>')[1].split()[0]
if id not in id2f:
continue
fasta = id2f[id]
if fasta not in opened:
opened[fasta] = '%s.fa' % fasta
seq[1] += '\n'
with open(opened[fasta], 'a+') as f_out:
f_out.write('\n'.join(seq)) | [
"def",
"split_fasta",
"(",
"f",
",",
"id2f",
")",
":",
"opened",
"=",
"{",
"}",
"for",
"seq",
"in",
"parse_fasta",
"(",
"f",
")",
":",
"id",
"=",
"seq",
"[",
"0",
"]",
".",
"split",
"(",
"'>'",
")",
"[",
"1",
"]",
".",
"split",
"(",
")",
"[",
"0",
"]",
"if",
"id",
"not",
"in",
"id2f",
":",
"continue",
"fasta",
"=",
"id2f",
"[",
"id",
"]",
"if",
"fasta",
"not",
"in",
"opened",
":",
"opened",
"[",
"fasta",
"]",
"=",
"'%s.fa'",
"%",
"fasta",
"seq",
"[",
"1",
"]",
"+=",
"'\\n'",
"with",
"open",
"(",
"opened",
"[",
"fasta",
"]",
",",
"'a+'",
")",
"as",
"f_out",
":",
"f_out",
".",
"write",
"(",
"'\\n'",
".",
"join",
"(",
"seq",
")",
")"
] | split fasta file into separate fasta files based on list of scaffolds
that belong to each separate file | [
"split",
"fasta",
"file",
"into",
"separate",
"fasta",
"files",
"based",
"on",
"list",
"of",
"scaffolds",
"that",
"belong",
"to",
"each",
"separate",
"file"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/name2fasta.py#L7-L22 | train |
scottrice/pysteam | pysteam/legacy/steam.py | Steam._is_user_directory | def _is_user_directory(self, pathname):
"""Check whether `pathname` is a valid user data directory
This method is meant to be called on the contents of the userdata dir.
As such, it will return True when `pathname` refers to a directory name
that can be interpreted as a users' userID.
"""
fullpath = os.path.join(self.userdata_location(), pathname)
# SteamOS puts a directory named 'anonymous' in the userdata directory
# by default. Since we assume that pathname is a userID, ignore any name
# that can't be converted to a number
return os.path.isdir(fullpath) and pathname.isdigit() | python | def _is_user_directory(self, pathname):
"""Check whether `pathname` is a valid user data directory
This method is meant to be called on the contents of the userdata dir.
As such, it will return True when `pathname` refers to a directory name
that can be interpreted as a users' userID.
"""
fullpath = os.path.join(self.userdata_location(), pathname)
# SteamOS puts a directory named 'anonymous' in the userdata directory
# by default. Since we assume that pathname is a userID, ignore any name
# that can't be converted to a number
return os.path.isdir(fullpath) and pathname.isdigit() | [
"def",
"_is_user_directory",
"(",
"self",
",",
"pathname",
")",
":",
"fullpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"userdata_location",
"(",
")",
",",
"pathname",
")",
"# SteamOS puts a directory named 'anonymous' in the userdata directory",
"# by default. Since we assume that pathname is a userID, ignore any name",
"# that can't be converted to a number",
"return",
"os",
".",
"path",
".",
"isdir",
"(",
"fullpath",
")",
"and",
"pathname",
".",
"isdigit",
"(",
")"
] | Check whether `pathname` is a valid user data directory
This method is meant to be called on the contents of the userdata dir.
As such, it will return True when `pathname` refers to a directory name
that can be interpreted as a users' userID. | [
"Check",
"whether",
"pathname",
"is",
"a",
"valid",
"user",
"data",
"directory"
] | 1eb2254b5235a053a953e596fa7602d0b110245d | https://github.com/scottrice/pysteam/blob/1eb2254b5235a053a953e596fa7602d0b110245d/pysteam/legacy/steam.py#L47-L58 | train |
scottrice/pysteam | pysteam/legacy/steam.py | Steam.local_users | def local_users(self):
"""Returns an array of user ids for users on the filesystem"""
# Any users on the machine will have an entry inside of the userdata
# folder. As such, the easiest way to find a list of all users on the
# machine is to just list the folders inside userdata
userdirs = filter(self._is_user_directory, os.listdir(self.userdata_location()))
# Exploits the fact that the directory is named the same as the user id
return map(lambda userdir: user.User(self, int(userdir)), userdirs) | python | def local_users(self):
"""Returns an array of user ids for users on the filesystem"""
# Any users on the machine will have an entry inside of the userdata
# folder. As such, the easiest way to find a list of all users on the
# machine is to just list the folders inside userdata
userdirs = filter(self._is_user_directory, os.listdir(self.userdata_location()))
# Exploits the fact that the directory is named the same as the user id
return map(lambda userdir: user.User(self, int(userdir)), userdirs) | [
"def",
"local_users",
"(",
"self",
")",
":",
"# Any users on the machine will have an entry inside of the userdata",
"# folder. As such, the easiest way to find a list of all users on the",
"# machine is to just list the folders inside userdata",
"userdirs",
"=",
"filter",
"(",
"self",
".",
"_is_user_directory",
",",
"os",
".",
"listdir",
"(",
"self",
".",
"userdata_location",
"(",
")",
")",
")",
"# Exploits the fact that the directory is named the same as the user id",
"return",
"map",
"(",
"lambda",
"userdir",
":",
"user",
".",
"User",
"(",
"self",
",",
"int",
"(",
"userdir",
")",
")",
",",
"userdirs",
")"
] | Returns an array of user ids for users on the filesystem | [
"Returns",
"an",
"array",
"of",
"user",
"ids",
"for",
"users",
"on",
"the",
"filesystem"
] | 1eb2254b5235a053a953e596fa7602d0b110245d | https://github.com/scottrice/pysteam/blob/1eb2254b5235a053a953e596fa7602d0b110245d/pysteam/legacy/steam.py#L80-L87 | train |
opengridcc/opengrid | opengrid/library/weather.py | _calculate_degree_days | def _calculate_degree_days(temperature_equivalent, base_temperature, cooling=False):
"""
Calculates degree days, starting with a series of temperature equivalent values
Parameters
----------
temperature_equivalent : Pandas Series
base_temperature : float
cooling : bool
Set True if you want cooling degree days instead of heating degree days
Returns
-------
Pandas Series called HDD_base_temperature for heating degree days or
CDD_base_temperature for cooling degree days.
"""
if cooling:
ret = temperature_equivalent - base_temperature
else:
ret = base_temperature - temperature_equivalent
# degree days cannot be negative
ret[ret < 0] = 0
prefix = 'CDD' if cooling else 'HDD'
ret.name = '{}_{}'.format(prefix, base_temperature)
return ret | python | def _calculate_degree_days(temperature_equivalent, base_temperature, cooling=False):
"""
Calculates degree days, starting with a series of temperature equivalent values
Parameters
----------
temperature_equivalent : Pandas Series
base_temperature : float
cooling : bool
Set True if you want cooling degree days instead of heating degree days
Returns
-------
Pandas Series called HDD_base_temperature for heating degree days or
CDD_base_temperature for cooling degree days.
"""
if cooling:
ret = temperature_equivalent - base_temperature
else:
ret = base_temperature - temperature_equivalent
# degree days cannot be negative
ret[ret < 0] = 0
prefix = 'CDD' if cooling else 'HDD'
ret.name = '{}_{}'.format(prefix, base_temperature)
return ret | [
"def",
"_calculate_degree_days",
"(",
"temperature_equivalent",
",",
"base_temperature",
",",
"cooling",
"=",
"False",
")",
":",
"if",
"cooling",
":",
"ret",
"=",
"temperature_equivalent",
"-",
"base_temperature",
"else",
":",
"ret",
"=",
"base_temperature",
"-",
"temperature_equivalent",
"# degree days cannot be negative",
"ret",
"[",
"ret",
"<",
"0",
"]",
"=",
"0",
"prefix",
"=",
"'CDD'",
"if",
"cooling",
"else",
"'HDD'",
"ret",
".",
"name",
"=",
"'{}_{}'",
".",
"format",
"(",
"prefix",
",",
"base_temperature",
")",
"return",
"ret"
] | Calculates degree days, starting with a series of temperature equivalent values
Parameters
----------
temperature_equivalent : Pandas Series
base_temperature : float
cooling : bool
Set True if you want cooling degree days instead of heating degree days
Returns
-------
Pandas Series called HDD_base_temperature for heating degree days or
CDD_base_temperature for cooling degree days. | [
"Calculates",
"degree",
"days",
"starting",
"with",
"a",
"series",
"of",
"temperature",
"equivalent",
"values"
] | 69b8da3c8fcea9300226c45ef0628cd6d4307651 | https://github.com/opengridcc/opengrid/blob/69b8da3c8fcea9300226c45ef0628cd6d4307651/opengrid/library/weather.py#L31-L59 | train |
mkouhei/bootstrap-py | bootstrap_py/classifiers.py | Classifiers.status | def status(self):
"""Development status."""
return {self._acronym_status(l): l for l in self.resp_text.split('\n')
if l.startswith(self.prefix_status)} | python | def status(self):
"""Development status."""
return {self._acronym_status(l): l for l in self.resp_text.split('\n')
if l.startswith(self.prefix_status)} | [
"def",
"status",
"(",
"self",
")",
":",
"return",
"{",
"self",
".",
"_acronym_status",
"(",
"l",
")",
":",
"l",
"for",
"l",
"in",
"self",
".",
"resp_text",
".",
"split",
"(",
"'\\n'",
")",
"if",
"l",
".",
"startswith",
"(",
"self",
".",
"prefix_status",
")",
"}"
] | Development status. | [
"Development",
"status",
"."
] | 95d56ed98ef409fd9f019dc352fd1c3711533275 | https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/classifiers.py#L33-L36 | train |
mkouhei/bootstrap-py | bootstrap_py/classifiers.py | Classifiers.licenses | def licenses(self):
"""OSI Approved license."""
return {self._acronym_lic(l): l for l in self.resp_text.split('\n')
if l.startswith(self.prefix_lic)} | python | def licenses(self):
"""OSI Approved license."""
return {self._acronym_lic(l): l for l in self.resp_text.split('\n')
if l.startswith(self.prefix_lic)} | [
"def",
"licenses",
"(",
"self",
")",
":",
"return",
"{",
"self",
".",
"_acronym_lic",
"(",
"l",
")",
":",
"l",
"for",
"l",
"in",
"self",
".",
"resp_text",
".",
"split",
"(",
"'\\n'",
")",
"if",
"l",
".",
"startswith",
"(",
"self",
".",
"prefix_lic",
")",
"}"
] | OSI Approved license. | [
"OSI",
"Approved",
"license",
"."
] | 95d56ed98ef409fd9f019dc352fd1c3711533275 | https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/classifiers.py#L43-L46 | train |
mkouhei/bootstrap-py | bootstrap_py/classifiers.py | Classifiers.licenses_desc | def licenses_desc(self):
"""Remove prefix."""
return {self._acronym_lic(l): l.split(self.prefix_lic)[1]
for l in self.resp_text.split('\n')
if l.startswith(self.prefix_lic)} | python | def licenses_desc(self):
"""Remove prefix."""
return {self._acronym_lic(l): l.split(self.prefix_lic)[1]
for l in self.resp_text.split('\n')
if l.startswith(self.prefix_lic)} | [
"def",
"licenses_desc",
"(",
"self",
")",
":",
"return",
"{",
"self",
".",
"_acronym_lic",
"(",
"l",
")",
":",
"l",
".",
"split",
"(",
"self",
".",
"prefix_lic",
")",
"[",
"1",
"]",
"for",
"l",
"in",
"self",
".",
"resp_text",
".",
"split",
"(",
"'\\n'",
")",
"if",
"l",
".",
"startswith",
"(",
"self",
".",
"prefix_lic",
")",
"}"
] | Remove prefix. | [
"Remove",
"prefix",
"."
] | 95d56ed98ef409fd9f019dc352fd1c3711533275 | https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/classifiers.py#L48-L52 | train |
mkouhei/bootstrap-py | bootstrap_py/classifiers.py | Classifiers._acronym_lic | def _acronym_lic(self, license_statement):
"""Convert license acronym."""
pat = re.compile(r'\(([\w+\W?\s?]+)\)')
if pat.search(license_statement):
lic = pat.search(license_statement).group(1)
if lic.startswith('CNRI'):
acronym_licence = lic[:4]
else:
acronym_licence = lic.replace(' ', '')
else:
acronym_licence = ''.join(
[w[0]
for w in license_statement.split(self.prefix_lic)[1].split()])
return acronym_licence | python | def _acronym_lic(self, license_statement):
"""Convert license acronym."""
pat = re.compile(r'\(([\w+\W?\s?]+)\)')
if pat.search(license_statement):
lic = pat.search(license_statement).group(1)
if lic.startswith('CNRI'):
acronym_licence = lic[:4]
else:
acronym_licence = lic.replace(' ', '')
else:
acronym_licence = ''.join(
[w[0]
for w in license_statement.split(self.prefix_lic)[1].split()])
return acronym_licence | [
"def",
"_acronym_lic",
"(",
"self",
",",
"license_statement",
")",
":",
"pat",
"=",
"re",
".",
"compile",
"(",
"r'\\(([\\w+\\W?\\s?]+)\\)'",
")",
"if",
"pat",
".",
"search",
"(",
"license_statement",
")",
":",
"lic",
"=",
"pat",
".",
"search",
"(",
"license_statement",
")",
".",
"group",
"(",
"1",
")",
"if",
"lic",
".",
"startswith",
"(",
"'CNRI'",
")",
":",
"acronym_licence",
"=",
"lic",
"[",
":",
"4",
"]",
"else",
":",
"acronym_licence",
"=",
"lic",
".",
"replace",
"(",
"' '",
",",
"''",
")",
"else",
":",
"acronym_licence",
"=",
"''",
".",
"join",
"(",
"[",
"w",
"[",
"0",
"]",
"for",
"w",
"in",
"license_statement",
".",
"split",
"(",
"self",
".",
"prefix_lic",
")",
"[",
"1",
"]",
".",
"split",
"(",
")",
"]",
")",
"return",
"acronym_licence"
] | Convert license acronym. | [
"Convert",
"license",
"acronym",
"."
] | 95d56ed98ef409fd9f019dc352fd1c3711533275 | https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/classifiers.py#L54-L67 | train |
christophertbrown/bioscripts | ctbBio/ncbi_download.py | calcMD5 | def calcMD5(path):
"""
calc MD5 based on path
"""
# check that file exists
if os.path.exists(path) is False:
yield False
else:
command = ['md5sum', path]
p = Popen(command, stdout = PIPE)
for line in p.communicate()[0].splitlines():
yield line.decode('ascii').strip().split()[0]
p.wait()
yield False | python | def calcMD5(path):
"""
calc MD5 based on path
"""
# check that file exists
if os.path.exists(path) is False:
yield False
else:
command = ['md5sum', path]
p = Popen(command, stdout = PIPE)
for line in p.communicate()[0].splitlines():
yield line.decode('ascii').strip().split()[0]
p.wait()
yield False | [
"def",
"calcMD5",
"(",
"path",
")",
":",
"# check that file exists",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
"is",
"False",
":",
"yield",
"False",
"else",
":",
"command",
"=",
"[",
"'md5sum'",
",",
"path",
"]",
"p",
"=",
"Popen",
"(",
"command",
",",
"stdout",
"=",
"PIPE",
")",
"for",
"line",
"in",
"p",
".",
"communicate",
"(",
")",
"[",
"0",
"]",
".",
"splitlines",
"(",
")",
":",
"yield",
"line",
".",
"decode",
"(",
"'ascii'",
")",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"[",
"0",
"]",
"p",
".",
"wait",
"(",
")",
"yield",
"False"
] | calc MD5 based on path | [
"calc",
"MD5",
"based",
"on",
"path"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/ncbi_download.py#L18-L31 | train |
christophertbrown/bioscripts | ctbBio/ncbi_download.py | wget | def wget(ftp, f = False, exclude = False, name = False, md5 = False, tries = 10):
"""
download files with wget
"""
# file name
if f is False:
f = ftp.rsplit('/', 1)[-1]
# downloaded file if it does not already exist
# check md5s on server (optional)
t = 0
while md5check(f, ftp, md5, exclude) is not True:
t += 1
if name is not False:
print('# downloading:', name, f)
if exclude is False:
command = 'wget -q --random-wait %s' % (ftp)
else:
command = 'wget -q --random-wait -R %s %s' % (exclude, ftp)
p = Popen(command, shell = True)
p.communicate()
if t >= tries:
print('not downloaded:', name, f)
return [f, False]
return [f, True] | python | def wget(ftp, f = False, exclude = False, name = False, md5 = False, tries = 10):
"""
download files with wget
"""
# file name
if f is False:
f = ftp.rsplit('/', 1)[-1]
# downloaded file if it does not already exist
# check md5s on server (optional)
t = 0
while md5check(f, ftp, md5, exclude) is not True:
t += 1
if name is not False:
print('# downloading:', name, f)
if exclude is False:
command = 'wget -q --random-wait %s' % (ftp)
else:
command = 'wget -q --random-wait -R %s %s' % (exclude, ftp)
p = Popen(command, shell = True)
p.communicate()
if t >= tries:
print('not downloaded:', name, f)
return [f, False]
return [f, True] | [
"def",
"wget",
"(",
"ftp",
",",
"f",
"=",
"False",
",",
"exclude",
"=",
"False",
",",
"name",
"=",
"False",
",",
"md5",
"=",
"False",
",",
"tries",
"=",
"10",
")",
":",
"# file name",
"if",
"f",
"is",
"False",
":",
"f",
"=",
"ftp",
".",
"rsplit",
"(",
"'/'",
",",
"1",
")",
"[",
"-",
"1",
"]",
"# downloaded file if it does not already exist",
"# check md5s on server (optional)",
"t",
"=",
"0",
"while",
"md5check",
"(",
"f",
",",
"ftp",
",",
"md5",
",",
"exclude",
")",
"is",
"not",
"True",
":",
"t",
"+=",
"1",
"if",
"name",
"is",
"not",
"False",
":",
"print",
"(",
"'# downloading:'",
",",
"name",
",",
"f",
")",
"if",
"exclude",
"is",
"False",
":",
"command",
"=",
"'wget -q --random-wait %s'",
"%",
"(",
"ftp",
")",
"else",
":",
"command",
"=",
"'wget -q --random-wait -R %s %s'",
"%",
"(",
"exclude",
",",
"ftp",
")",
"p",
"=",
"Popen",
"(",
"command",
",",
"shell",
"=",
"True",
")",
"p",
".",
"communicate",
"(",
")",
"if",
"t",
">=",
"tries",
":",
"print",
"(",
"'not downloaded:'",
",",
"name",
",",
"f",
")",
"return",
"[",
"f",
",",
"False",
"]",
"return",
"[",
"f",
",",
"True",
"]"
] | download files with wget | [
"download",
"files",
"with",
"wget"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/ncbi_download.py#L74-L97 | train |
christophertbrown/bioscripts | ctbBio/ncbi_download.py | check | def check(line, queries):
"""
check that at least one of
queries is in list, l
"""
line = line.strip()
spLine = line.replace('.', ' ').split()
matches = set(spLine).intersection(queries)
if len(matches) > 0:
return matches, line.split('\t')
return matches, False | python | def check(line, queries):
"""
check that at least one of
queries is in list, l
"""
line = line.strip()
spLine = line.replace('.', ' ').split()
matches = set(spLine).intersection(queries)
if len(matches) > 0:
return matches, line.split('\t')
return matches, False | [
"def",
"check",
"(",
"line",
",",
"queries",
")",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
"spLine",
"=",
"line",
".",
"replace",
"(",
"'.'",
",",
"' '",
")",
".",
"split",
"(",
")",
"matches",
"=",
"set",
"(",
"spLine",
")",
".",
"intersection",
"(",
"queries",
")",
"if",
"len",
"(",
"matches",
")",
">",
"0",
":",
"return",
"matches",
",",
"line",
".",
"split",
"(",
"'\\t'",
")",
"return",
"matches",
",",
"False"
] | check that at least one of
queries is in list, l | [
"check",
"that",
"at",
"least",
"one",
"of",
"queries",
"is",
"in",
"list",
"l"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/ncbi_download.py#L99-L109 | train |
christophertbrown/bioscripts | ctbBio/ncbi_download.py | entrez | def entrez(db, acc):
"""
search entrez using specified database
and accession
"""
c1 = ['esearch', '-db', db, '-query', acc]
c2 = ['efetch', '-db', 'BioSample', '-format', 'docsum']
p1 = Popen(c1, stdout = PIPE, stderr = PIPE)
p2 = Popen(c2, stdin = p1.stdout, stdout = PIPE, stderr = PIPE)
return p2.communicate() | python | def entrez(db, acc):
"""
search entrez using specified database
and accession
"""
c1 = ['esearch', '-db', db, '-query', acc]
c2 = ['efetch', '-db', 'BioSample', '-format', 'docsum']
p1 = Popen(c1, stdout = PIPE, stderr = PIPE)
p2 = Popen(c2, stdin = p1.stdout, stdout = PIPE, stderr = PIPE)
return p2.communicate() | [
"def",
"entrez",
"(",
"db",
",",
"acc",
")",
":",
"c1",
"=",
"[",
"'esearch'",
",",
"'-db'",
",",
"db",
",",
"'-query'",
",",
"acc",
"]",
"c2",
"=",
"[",
"'efetch'",
",",
"'-db'",
",",
"'BioSample'",
",",
"'-format'",
",",
"'docsum'",
"]",
"p1",
"=",
"Popen",
"(",
"c1",
",",
"stdout",
"=",
"PIPE",
",",
"stderr",
"=",
"PIPE",
")",
"p2",
"=",
"Popen",
"(",
"c2",
",",
"stdin",
"=",
"p1",
".",
"stdout",
",",
"stdout",
"=",
"PIPE",
",",
"stderr",
"=",
"PIPE",
")",
"return",
"p2",
".",
"communicate",
"(",
")"
] | search entrez using specified database
and accession | [
"search",
"entrez",
"using",
"specified",
"database",
"and",
"accession"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/ncbi_download.py#L111-L120 | train |
christophertbrown/bioscripts | ctbBio/ncbi_download.py | searchAccession | def searchAccession(acc):
"""
attempt to use NCBI Entrez to get
BioSample ID
"""
# try genbank file
# genome database
out, error = entrez('genome', acc)
for line in out.splitlines():
line = line.decode('ascii').strip()
if 'Assembly_Accession' in line or 'BioSample' in line:
newAcc = line.split('>')[1].split('<')[0].split('.')[0].split(',')[0]
if len(newAcc) > 0:
return (True, acc, newAcc)
# nucleotide database
out, error = entrez('nucleotide', acc)
for line in out.splitlines():
line = line.decode('ascii').strip()
if 'Assembly_Accession' in line or 'BioSample' in line:
newAcc = line.split('>')[1].split('<')[0].split('.')[0].split(',')[0]
if len(newAcc) > 0:
return (True, acc, newAcc)
# assembly database
out, error = entrez('assembly', acc)
for line in out.splitlines():
line = line.decode('ascii').strip()
if 'Assembly_Accession' in line or 'BioSample' in line:
newAcc = line.split('>')[1].split('<')[0].split('.')[0].split(',')[0]
if len(newAcc) > 0:
return (True, acc, newAcc)
for error in error.splitlines():
error = error.decode('ascii').strip()
if '500 Can' in error:
return (False, acc, 'no network')
return (False, acc, 'efetch failed') | python | def searchAccession(acc):
"""
attempt to use NCBI Entrez to get
BioSample ID
"""
# try genbank file
# genome database
out, error = entrez('genome', acc)
for line in out.splitlines():
line = line.decode('ascii').strip()
if 'Assembly_Accession' in line or 'BioSample' in line:
newAcc = line.split('>')[1].split('<')[0].split('.')[0].split(',')[0]
if len(newAcc) > 0:
return (True, acc, newAcc)
# nucleotide database
out, error = entrez('nucleotide', acc)
for line in out.splitlines():
line = line.decode('ascii').strip()
if 'Assembly_Accession' in line or 'BioSample' in line:
newAcc = line.split('>')[1].split('<')[0].split('.')[0].split(',')[0]
if len(newAcc) > 0:
return (True, acc, newAcc)
# assembly database
out, error = entrez('assembly', acc)
for line in out.splitlines():
line = line.decode('ascii').strip()
if 'Assembly_Accession' in line or 'BioSample' in line:
newAcc = line.split('>')[1].split('<')[0].split('.')[0].split(',')[0]
if len(newAcc) > 0:
return (True, acc, newAcc)
for error in error.splitlines():
error = error.decode('ascii').strip()
if '500 Can' in error:
return (False, acc, 'no network')
return (False, acc, 'efetch failed') | [
"def",
"searchAccession",
"(",
"acc",
")",
":",
"# try genbank file",
"# genome database",
"out",
",",
"error",
"=",
"entrez",
"(",
"'genome'",
",",
"acc",
")",
"for",
"line",
"in",
"out",
".",
"splitlines",
"(",
")",
":",
"line",
"=",
"line",
".",
"decode",
"(",
"'ascii'",
")",
".",
"strip",
"(",
")",
"if",
"'Assembly_Accession'",
"in",
"line",
"or",
"'BioSample'",
"in",
"line",
":",
"newAcc",
"=",
"line",
".",
"split",
"(",
"'>'",
")",
"[",
"1",
"]",
".",
"split",
"(",
"'<'",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
".",
"split",
"(",
"','",
")",
"[",
"0",
"]",
"if",
"len",
"(",
"newAcc",
")",
">",
"0",
":",
"return",
"(",
"True",
",",
"acc",
",",
"newAcc",
")",
"# nucleotide database",
"out",
",",
"error",
"=",
"entrez",
"(",
"'nucleotide'",
",",
"acc",
")",
"for",
"line",
"in",
"out",
".",
"splitlines",
"(",
")",
":",
"line",
"=",
"line",
".",
"decode",
"(",
"'ascii'",
")",
".",
"strip",
"(",
")",
"if",
"'Assembly_Accession'",
"in",
"line",
"or",
"'BioSample'",
"in",
"line",
":",
"newAcc",
"=",
"line",
".",
"split",
"(",
"'>'",
")",
"[",
"1",
"]",
".",
"split",
"(",
"'<'",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
".",
"split",
"(",
"','",
")",
"[",
"0",
"]",
"if",
"len",
"(",
"newAcc",
")",
">",
"0",
":",
"return",
"(",
"True",
",",
"acc",
",",
"newAcc",
")",
"# assembly database",
"out",
",",
"error",
"=",
"entrez",
"(",
"'assembly'",
",",
"acc",
")",
"for",
"line",
"in",
"out",
".",
"splitlines",
"(",
")",
":",
"line",
"=",
"line",
".",
"decode",
"(",
"'ascii'",
")",
".",
"strip",
"(",
")",
"if",
"'Assembly_Accession'",
"in",
"line",
"or",
"'BioSample'",
"in",
"line",
":",
"newAcc",
"=",
"line",
".",
"split",
"(",
"'>'",
")",
"[",
"1",
"]",
".",
"split",
"(",
"'<'",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
".",
"split",
"(",
"','",
")",
"[",
"0",
"]",
"if",
"len",
"(",
"newAcc",
")",
">",
"0",
":",
"return",
"(",
"True",
",",
"acc",
",",
"newAcc",
")",
"for",
"error",
"in",
"error",
".",
"splitlines",
"(",
")",
":",
"error",
"=",
"error",
".",
"decode",
"(",
"'ascii'",
")",
".",
"strip",
"(",
")",
"if",
"'500 Can'",
"in",
"error",
":",
"return",
"(",
"False",
",",
"acc",
",",
"'no network'",
")",
"return",
"(",
"False",
",",
"acc",
",",
"'efetch failed'",
")"
] | attempt to use NCBI Entrez to get
BioSample ID | [
"attempt",
"to",
"use",
"NCBI",
"Entrez",
"to",
"get",
"BioSample",
"ID"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/ncbi_download.py#L122-L156 | train |
christophertbrown/bioscripts | ctbBio/ncbi_download.py | getFTPs | def getFTPs(accessions, ftp, search, exclude, convert = False, threads = 1, attempt = 1,
max_attempts = 2):
"""
download genome info from NCBI
"""
info = wget(ftp)[0]
allMatches = []
for genome in open(info, encoding = 'utf8'):
genome = str(genome)
matches, genomeInfo = check(genome, accessions)
if genomeInfo is not False:
f = genomeInfo[0] + search
Gftp = genomeInfo[19]
Gftp = Gftp + '/' + search
allMatches.extend(matches)
yield (Gftp, f, exclude, matches)
# print accessions that could not be matched
# and whether or not they could be converted (optional)
newAccs = []
missing = accessions.difference(set(allMatches))
if convert is True:
pool = Pool(threads)
pool = pool.imap_unordered(searchAccession, missing)
for newAcc in tqdm(pool, total = len(missing)):
status, accession, newAcc = newAcc
if status is True:
newAccs.append(newAcc)
print('not found:', accession, '->', newAcc)
else:
for accession in missing:
print('not found:', accession)
# re-try after converting accessions (optional)
if len(newAccs) > 0 and attempt <= max_attempts:
print('convert accession attempt', attempt)
attempt += 1
for hit in getFTPs(set(newAccs), ftp, search, exclude, convert,
threads = 1, attempt = attempt):
yield hit | python | def getFTPs(accessions, ftp, search, exclude, convert = False, threads = 1, attempt = 1,
max_attempts = 2):
"""
download genome info from NCBI
"""
info = wget(ftp)[0]
allMatches = []
for genome in open(info, encoding = 'utf8'):
genome = str(genome)
matches, genomeInfo = check(genome, accessions)
if genomeInfo is not False:
f = genomeInfo[0] + search
Gftp = genomeInfo[19]
Gftp = Gftp + '/' + search
allMatches.extend(matches)
yield (Gftp, f, exclude, matches)
# print accessions that could not be matched
# and whether or not they could be converted (optional)
newAccs = []
missing = accessions.difference(set(allMatches))
if convert is True:
pool = Pool(threads)
pool = pool.imap_unordered(searchAccession, missing)
for newAcc in tqdm(pool, total = len(missing)):
status, accession, newAcc = newAcc
if status is True:
newAccs.append(newAcc)
print('not found:', accession, '->', newAcc)
else:
for accession in missing:
print('not found:', accession)
# re-try after converting accessions (optional)
if len(newAccs) > 0 and attempt <= max_attempts:
print('convert accession attempt', attempt)
attempt += 1
for hit in getFTPs(set(newAccs), ftp, search, exclude, convert,
threads = 1, attempt = attempt):
yield hit | [
"def",
"getFTPs",
"(",
"accessions",
",",
"ftp",
",",
"search",
",",
"exclude",
",",
"convert",
"=",
"False",
",",
"threads",
"=",
"1",
",",
"attempt",
"=",
"1",
",",
"max_attempts",
"=",
"2",
")",
":",
"info",
"=",
"wget",
"(",
"ftp",
")",
"[",
"0",
"]",
"allMatches",
"=",
"[",
"]",
"for",
"genome",
"in",
"open",
"(",
"info",
",",
"encoding",
"=",
"'utf8'",
")",
":",
"genome",
"=",
"str",
"(",
"genome",
")",
"matches",
",",
"genomeInfo",
"=",
"check",
"(",
"genome",
",",
"accessions",
")",
"if",
"genomeInfo",
"is",
"not",
"False",
":",
"f",
"=",
"genomeInfo",
"[",
"0",
"]",
"+",
"search",
"Gftp",
"=",
"genomeInfo",
"[",
"19",
"]",
"Gftp",
"=",
"Gftp",
"+",
"'/'",
"+",
"search",
"allMatches",
".",
"extend",
"(",
"matches",
")",
"yield",
"(",
"Gftp",
",",
"f",
",",
"exclude",
",",
"matches",
")",
"# print accessions that could not be matched",
"# and whether or not they could be converted (optional)",
"newAccs",
"=",
"[",
"]",
"missing",
"=",
"accessions",
".",
"difference",
"(",
"set",
"(",
"allMatches",
")",
")",
"if",
"convert",
"is",
"True",
":",
"pool",
"=",
"Pool",
"(",
"threads",
")",
"pool",
"=",
"pool",
".",
"imap_unordered",
"(",
"searchAccession",
",",
"missing",
")",
"for",
"newAcc",
"in",
"tqdm",
"(",
"pool",
",",
"total",
"=",
"len",
"(",
"missing",
")",
")",
":",
"status",
",",
"accession",
",",
"newAcc",
"=",
"newAcc",
"if",
"status",
"is",
"True",
":",
"newAccs",
".",
"append",
"(",
"newAcc",
")",
"print",
"(",
"'not found:'",
",",
"accession",
",",
"'->'",
",",
"newAcc",
")",
"else",
":",
"for",
"accession",
"in",
"missing",
":",
"print",
"(",
"'not found:'",
",",
"accession",
")",
"# re-try after converting accessions (optional)",
"if",
"len",
"(",
"newAccs",
")",
">",
"0",
"and",
"attempt",
"<=",
"max_attempts",
":",
"print",
"(",
"'convert accession attempt'",
",",
"attempt",
")",
"attempt",
"+=",
"1",
"for",
"hit",
"in",
"getFTPs",
"(",
"set",
"(",
"newAccs",
")",
",",
"ftp",
",",
"search",
",",
"exclude",
",",
"convert",
",",
"threads",
"=",
"1",
",",
"attempt",
"=",
"attempt",
")",
":",
"yield",
"hit"
] | download genome info from NCBI | [
"download",
"genome",
"info",
"from",
"NCBI"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/ncbi_download.py#L158-L195 | train |
christophertbrown/bioscripts | ctbBio/ncbi_download.py | download | def download(args):
"""
download genomes from NCBI
"""
accessions, infoFTP = set(args['g']), args['i']
search, exclude = args['s'], args['e']
FTPs = getFTPs(accessions, infoFTP, search, exclude, threads = args['t'],
convert = args['convert'])
if args['test'] is True:
for genome in FTPs:
print('found:', ';'.join(genome[-1]), genome[0])
return FTPs
pool = Pool(args['t'])
pool = pool.imap_unordered(wgetGenome, FTPs)
files = []
for f in tqdm(pool, total = len(accessions)):
files.append(f)
return files | python | def download(args):
"""
download genomes from NCBI
"""
accessions, infoFTP = set(args['g']), args['i']
search, exclude = args['s'], args['e']
FTPs = getFTPs(accessions, infoFTP, search, exclude, threads = args['t'],
convert = args['convert'])
if args['test'] is True:
for genome in FTPs:
print('found:', ';'.join(genome[-1]), genome[0])
return FTPs
pool = Pool(args['t'])
pool = pool.imap_unordered(wgetGenome, FTPs)
files = []
for f in tqdm(pool, total = len(accessions)):
files.append(f)
return files | [
"def",
"download",
"(",
"args",
")",
":",
"accessions",
",",
"infoFTP",
"=",
"set",
"(",
"args",
"[",
"'g'",
"]",
")",
",",
"args",
"[",
"'i'",
"]",
"search",
",",
"exclude",
"=",
"args",
"[",
"'s'",
"]",
",",
"args",
"[",
"'e'",
"]",
"FTPs",
"=",
"getFTPs",
"(",
"accessions",
",",
"infoFTP",
",",
"search",
",",
"exclude",
",",
"threads",
"=",
"args",
"[",
"'t'",
"]",
",",
"convert",
"=",
"args",
"[",
"'convert'",
"]",
")",
"if",
"args",
"[",
"'test'",
"]",
"is",
"True",
":",
"for",
"genome",
"in",
"FTPs",
":",
"print",
"(",
"'found:'",
",",
"';'",
".",
"join",
"(",
"genome",
"[",
"-",
"1",
"]",
")",
",",
"genome",
"[",
"0",
"]",
")",
"return",
"FTPs",
"pool",
"=",
"Pool",
"(",
"args",
"[",
"'t'",
"]",
")",
"pool",
"=",
"pool",
".",
"imap_unordered",
"(",
"wgetGenome",
",",
"FTPs",
")",
"files",
"=",
"[",
"]",
"for",
"f",
"in",
"tqdm",
"(",
"pool",
",",
"total",
"=",
"len",
"(",
"accessions",
")",
")",
":",
"files",
".",
"append",
"(",
"f",
")",
"return",
"files"
] | download genomes from NCBI | [
"download",
"genomes",
"from",
"NCBI"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/ncbi_download.py#L204-L221 | train |
christophertbrown/bioscripts | ctbBio/fix_fasta.py | fix_fasta | def fix_fasta(fasta):
"""
remove pesky characters from fasta file header
"""
for seq in parse_fasta(fasta):
seq[0] = remove_char(seq[0])
if len(seq[1]) > 0:
yield seq | python | def fix_fasta(fasta):
"""
remove pesky characters from fasta file header
"""
for seq in parse_fasta(fasta):
seq[0] = remove_char(seq[0])
if len(seq[1]) > 0:
yield seq | [
"def",
"fix_fasta",
"(",
"fasta",
")",
":",
"for",
"seq",
"in",
"parse_fasta",
"(",
"fasta",
")",
":",
"seq",
"[",
"0",
"]",
"=",
"remove_char",
"(",
"seq",
"[",
"0",
"]",
")",
"if",
"len",
"(",
"seq",
"[",
"1",
"]",
")",
">",
"0",
":",
"yield",
"seq"
] | remove pesky characters from fasta file header | [
"remove",
"pesky",
"characters",
"from",
"fasta",
"file",
"header"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/fix_fasta.py#L18-L25 | train |
ssanderson/pstats-view | pstatsviewer/viewer.py | _calc_frames | def _calc_frames(stats):
"""
Compute a DataFrame summary of a Stats object.
"""
timings = []
callers = []
for key, values in iteritems(stats.stats):
timings.append(
pd.Series(
key + values[:-1],
index=timing_colnames,
)
)
for caller_key, caller_values in iteritems(values[-1]):
callers.append(
pd.Series(
key + caller_key + caller_values,
index=caller_columns,
)
)
timings_df = pd.DataFrame(timings)
callers_df = pd.DataFrame(callers)
timings_df['filename:funcname'] = \
(timings_df['filename'] + ':' + timings_df['funcname'])
timings_df = timings_df.groupby('filename:funcname').sum()
return timings_df, callers_df | python | def _calc_frames(stats):
"""
Compute a DataFrame summary of a Stats object.
"""
timings = []
callers = []
for key, values in iteritems(stats.stats):
timings.append(
pd.Series(
key + values[:-1],
index=timing_colnames,
)
)
for caller_key, caller_values in iteritems(values[-1]):
callers.append(
pd.Series(
key + caller_key + caller_values,
index=caller_columns,
)
)
timings_df = pd.DataFrame(timings)
callers_df = pd.DataFrame(callers)
timings_df['filename:funcname'] = \
(timings_df['filename'] + ':' + timings_df['funcname'])
timings_df = timings_df.groupby('filename:funcname').sum()
return timings_df, callers_df | [
"def",
"_calc_frames",
"(",
"stats",
")",
":",
"timings",
"=",
"[",
"]",
"callers",
"=",
"[",
"]",
"for",
"key",
",",
"values",
"in",
"iteritems",
"(",
"stats",
".",
"stats",
")",
":",
"timings",
".",
"append",
"(",
"pd",
".",
"Series",
"(",
"key",
"+",
"values",
"[",
":",
"-",
"1",
"]",
",",
"index",
"=",
"timing_colnames",
",",
")",
")",
"for",
"caller_key",
",",
"caller_values",
"in",
"iteritems",
"(",
"values",
"[",
"-",
"1",
"]",
")",
":",
"callers",
".",
"append",
"(",
"pd",
".",
"Series",
"(",
"key",
"+",
"caller_key",
"+",
"caller_values",
",",
"index",
"=",
"caller_columns",
",",
")",
")",
"timings_df",
"=",
"pd",
".",
"DataFrame",
"(",
"timings",
")",
"callers_df",
"=",
"pd",
".",
"DataFrame",
"(",
"callers",
")",
"timings_df",
"[",
"'filename:funcname'",
"]",
"=",
"(",
"timings_df",
"[",
"'filename'",
"]",
"+",
"':'",
"+",
"timings_df",
"[",
"'funcname'",
"]",
")",
"timings_df",
"=",
"timings_df",
".",
"groupby",
"(",
"'filename:funcname'",
")",
".",
"sum",
"(",
")",
"return",
"timings_df",
",",
"callers_df"
] | Compute a DataFrame summary of a Stats object. | [
"Compute",
"a",
"DataFrame",
"summary",
"of",
"a",
"Stats",
"object",
"."
] | 62148d4e01765806bc5e6bb40628cdb186482c05 | https://github.com/ssanderson/pstats-view/blob/62148d4e01765806bc5e6bb40628cdb186482c05/pstatsviewer/viewer.py#L40-L66 | train |
christophertbrown/bioscripts | ctbBio/unmapped.py | unmapped | def unmapped(sam, mates):
"""
get unmapped reads
"""
for read in sam:
if read.startswith('@') is True:
continue
read = read.strip().split()
if read[2] == '*' and read[6] == '*':
yield read
elif mates is True:
if read[2] == '*' or read[6] == '*':
yield read
for i in read:
if i == 'YT:Z:UP':
yield read | python | def unmapped(sam, mates):
"""
get unmapped reads
"""
for read in sam:
if read.startswith('@') is True:
continue
read = read.strip().split()
if read[2] == '*' and read[6] == '*':
yield read
elif mates is True:
if read[2] == '*' or read[6] == '*':
yield read
for i in read:
if i == 'YT:Z:UP':
yield read | [
"def",
"unmapped",
"(",
"sam",
",",
"mates",
")",
":",
"for",
"read",
"in",
"sam",
":",
"if",
"read",
".",
"startswith",
"(",
"'@'",
")",
"is",
"True",
":",
"continue",
"read",
"=",
"read",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"if",
"read",
"[",
"2",
"]",
"==",
"'*'",
"and",
"read",
"[",
"6",
"]",
"==",
"'*'",
":",
"yield",
"read",
"elif",
"mates",
"is",
"True",
":",
"if",
"read",
"[",
"2",
"]",
"==",
"'*'",
"or",
"read",
"[",
"6",
"]",
"==",
"'*'",
":",
"yield",
"read",
"for",
"i",
"in",
"read",
":",
"if",
"i",
"==",
"'YT:Z:UP'",
":",
"yield",
"read"
] | get unmapped reads | [
"get",
"unmapped",
"reads"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/unmapped.py#L11-L26 | train |
christophertbrown/bioscripts | ctbBio/parallel.py | parallel | def parallel(processes, threads):
"""
execute jobs in processes using N threads
"""
pool = multithread(threads)
pool.map(run_process, processes)
pool.close()
pool.join() | python | def parallel(processes, threads):
"""
execute jobs in processes using N threads
"""
pool = multithread(threads)
pool.map(run_process, processes)
pool.close()
pool.join() | [
"def",
"parallel",
"(",
"processes",
",",
"threads",
")",
":",
"pool",
"=",
"multithread",
"(",
"threads",
")",
"pool",
".",
"map",
"(",
"run_process",
",",
"processes",
")",
"pool",
".",
"close",
"(",
")",
"pool",
".",
"join",
"(",
")"
] | execute jobs in processes using N threads | [
"execute",
"jobs",
"in",
"processes",
"using",
"N",
"threads"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/parallel.py#L19-L26 | train |
deep-compute/basescript | basescript/log.py | define_log_renderer | def define_log_renderer(fmt, fpath, quiet):
"""
the final log processor that structlog requires to render.
"""
# it must accept a logger, method_name and event_dict (just like processors)
# but must return the rendered string, not a dictionary.
# TODO tty logic
if fmt:
return structlog.processors.JSONRenderer()
if fpath is not None:
return structlog.processors.JSONRenderer()
if sys.stderr.isatty() and not quiet:
return structlog.dev.ConsoleRenderer()
return structlog.processors.JSONRenderer() | python | def define_log_renderer(fmt, fpath, quiet):
"""
the final log processor that structlog requires to render.
"""
# it must accept a logger, method_name and event_dict (just like processors)
# but must return the rendered string, not a dictionary.
# TODO tty logic
if fmt:
return structlog.processors.JSONRenderer()
if fpath is not None:
return structlog.processors.JSONRenderer()
if sys.stderr.isatty() and not quiet:
return structlog.dev.ConsoleRenderer()
return structlog.processors.JSONRenderer() | [
"def",
"define_log_renderer",
"(",
"fmt",
",",
"fpath",
",",
"quiet",
")",
":",
"# it must accept a logger, method_name and event_dict (just like processors)",
"# but must return the rendered string, not a dictionary.",
"# TODO tty logic",
"if",
"fmt",
":",
"return",
"structlog",
".",
"processors",
".",
"JSONRenderer",
"(",
")",
"if",
"fpath",
"is",
"not",
"None",
":",
"return",
"structlog",
".",
"processors",
".",
"JSONRenderer",
"(",
")",
"if",
"sys",
".",
"stderr",
".",
"isatty",
"(",
")",
"and",
"not",
"quiet",
":",
"return",
"structlog",
".",
"dev",
".",
"ConsoleRenderer",
"(",
")",
"return",
"structlog",
".",
"processors",
".",
"JSONRenderer",
"(",
")"
] | the final log processor that structlog requires to render. | [
"the",
"final",
"log",
"processor",
"that",
"structlog",
"requires",
"to",
"render",
"."
] | f7233963c5291530fcb2444a7f45b556e6407b90 | https://github.com/deep-compute/basescript/blob/f7233963c5291530fcb2444a7f45b556e6407b90/basescript/log.py#L239-L256 | train |
deep-compute/basescript | basescript/log.py | _structlog_default_keys_processor | def _structlog_default_keys_processor(logger_class, log_method, event):
''' Add unique id, type and hostname '''
global HOSTNAME
if 'id' not in event:
event['id'] = '%s_%s' % (
datetime.utcnow().strftime('%Y%m%dT%H%M%S'),
uuid.uuid1().hex
)
if 'type' not in event:
event['type'] = 'log'
event['host'] = HOSTNAME
return event | python | def _structlog_default_keys_processor(logger_class, log_method, event):
''' Add unique id, type and hostname '''
global HOSTNAME
if 'id' not in event:
event['id'] = '%s_%s' % (
datetime.utcnow().strftime('%Y%m%dT%H%M%S'),
uuid.uuid1().hex
)
if 'type' not in event:
event['type'] = 'log'
event['host'] = HOSTNAME
return event | [
"def",
"_structlog_default_keys_processor",
"(",
"logger_class",
",",
"log_method",
",",
"event",
")",
":",
"global",
"HOSTNAME",
"if",
"'id'",
"not",
"in",
"event",
":",
"event",
"[",
"'id'",
"]",
"=",
"'%s_%s'",
"%",
"(",
"datetime",
".",
"utcnow",
"(",
")",
".",
"strftime",
"(",
"'%Y%m%dT%H%M%S'",
")",
",",
"uuid",
".",
"uuid1",
"(",
")",
".",
"hex",
")",
"if",
"'type'",
"not",
"in",
"event",
":",
"event",
"[",
"'type'",
"]",
"=",
"'log'",
"event",
"[",
"'host'",
"]",
"=",
"HOSTNAME",
"return",
"event"
] | Add unique id, type and hostname | [
"Add",
"unique",
"id",
"type",
"and",
"hostname"
] | f7233963c5291530fcb2444a7f45b556e6407b90 | https://github.com/deep-compute/basescript/blob/f7233963c5291530fcb2444a7f45b556e6407b90/basescript/log.py#L258-L273 | train |
deep-compute/basescript | basescript/log.py | define_log_processors | def define_log_processors():
"""
log processors that structlog executes before final rendering
"""
# these processors should accept logger, method_name and event_dict
# and return a new dictionary which will be passed as event_dict to the next one.
return [
structlog.processors.TimeStamper(fmt="iso"),
_structlog_default_keys_processor,
structlog.stdlib.PositionalArgumentsFormatter(),
structlog.processors.StackInfoRenderer(),
structlog.processors.format_exc_info,
] | python | def define_log_processors():
"""
log processors that structlog executes before final rendering
"""
# these processors should accept logger, method_name and event_dict
# and return a new dictionary which will be passed as event_dict to the next one.
return [
structlog.processors.TimeStamper(fmt="iso"),
_structlog_default_keys_processor,
structlog.stdlib.PositionalArgumentsFormatter(),
structlog.processors.StackInfoRenderer(),
structlog.processors.format_exc_info,
] | [
"def",
"define_log_processors",
"(",
")",
":",
"# these processors should accept logger, method_name and event_dict",
"# and return a new dictionary which will be passed as event_dict to the next one.",
"return",
"[",
"structlog",
".",
"processors",
".",
"TimeStamper",
"(",
"fmt",
"=",
"\"iso\"",
")",
",",
"_structlog_default_keys_processor",
",",
"structlog",
".",
"stdlib",
".",
"PositionalArgumentsFormatter",
"(",
")",
",",
"structlog",
".",
"processors",
".",
"StackInfoRenderer",
"(",
")",
",",
"structlog",
".",
"processors",
".",
"format_exc_info",
",",
"]"
] | log processors that structlog executes before final rendering | [
"log",
"processors",
"that",
"structlog",
"executes",
"before",
"final",
"rendering"
] | f7233963c5291530fcb2444a7f45b556e6407b90 | https://github.com/deep-compute/basescript/blob/f7233963c5291530fcb2444a7f45b556e6407b90/basescript/log.py#L352-L364 | train |
deep-compute/basescript | basescript/log.py | _configure_logger | def _configure_logger(fmt, quiet, level, fpath,
pre_hooks, post_hooks, metric_grouping_interval):
"""
configures a logger when required write to stderr or a file
"""
# NOTE not thread safe. Multiple BaseScripts cannot be instantiated concurrently.
level = getattr(logging, level.upper())
global _GLOBAL_LOG_CONFIGURED
if _GLOBAL_LOG_CONFIGURED:
return
# since the hooks need to run through structlog, need to wrap them like processors
def wrap_hook(fn):
@wraps(fn)
def processor(logger, method_name, event_dict):
fn(event_dict)
return event_dict
return processor
processors = define_log_processors()
processors.extend(
[ wrap_hook(h) for h in pre_hooks ]
)
if metric_grouping_interval:
processors.append(metrics_grouping_processor)
log_renderer = define_log_renderer(fmt, fpath, quiet)
stderr_required = (not quiet)
pretty_to_stderr = (
stderr_required
and (
fmt == "pretty"
or (fmt is None and sys.stderr.isatty())
)
)
should_inject_pretty_renderer = (
pretty_to_stderr
and not isinstance(log_renderer, structlog.dev.ConsoleRenderer)
)
if should_inject_pretty_renderer:
stderr_required = False
processors.append(StderrConsoleRenderer())
processors.append(log_renderer)
processors.extend(
[ wrap_hook(h) for h in post_hooks ]
)
streams = []
# we need to use a stream if we are writing to both file and stderr, and both are json
if stderr_required:
streams.append(sys.stderr)
if fpath is not None:
# TODO handle creating a directory for this log file ?
# TODO set mode and encoding appropriately
streams.append(open(fpath, 'a'))
assert len(streams) != 0, "cannot configure logger for 0 streams"
stream = streams[0] if len(streams) == 1 else Stream(*streams)
atexit.register(stream.close)
# a global level struct log config unless otherwise specified.
structlog.configure(
processors=processors,
context_class=dict,
logger_factory=LevelLoggerFactory(stream, level=level),
wrapper_class=BoundLevelLogger,
cache_logger_on_first_use=True,
)
# TODO take care of removing other handlers
stdlib_root_log = logging.getLogger()
stdlib_root_log.addHandler(StdlibStructlogHandler())
stdlib_root_log.setLevel(level)
_GLOBAL_LOG_CONFIGURED = True | python | def _configure_logger(fmt, quiet, level, fpath,
pre_hooks, post_hooks, metric_grouping_interval):
"""
configures a logger when required write to stderr or a file
"""
# NOTE not thread safe. Multiple BaseScripts cannot be instantiated concurrently.
level = getattr(logging, level.upper())
global _GLOBAL_LOG_CONFIGURED
if _GLOBAL_LOG_CONFIGURED:
return
# since the hooks need to run through structlog, need to wrap them like processors
def wrap_hook(fn):
@wraps(fn)
def processor(logger, method_name, event_dict):
fn(event_dict)
return event_dict
return processor
processors = define_log_processors()
processors.extend(
[ wrap_hook(h) for h in pre_hooks ]
)
if metric_grouping_interval:
processors.append(metrics_grouping_processor)
log_renderer = define_log_renderer(fmt, fpath, quiet)
stderr_required = (not quiet)
pretty_to_stderr = (
stderr_required
and (
fmt == "pretty"
or (fmt is None and sys.stderr.isatty())
)
)
should_inject_pretty_renderer = (
pretty_to_stderr
and not isinstance(log_renderer, structlog.dev.ConsoleRenderer)
)
if should_inject_pretty_renderer:
stderr_required = False
processors.append(StderrConsoleRenderer())
processors.append(log_renderer)
processors.extend(
[ wrap_hook(h) for h in post_hooks ]
)
streams = []
# we need to use a stream if we are writing to both file and stderr, and both are json
if stderr_required:
streams.append(sys.stderr)
if fpath is not None:
# TODO handle creating a directory for this log file ?
# TODO set mode and encoding appropriately
streams.append(open(fpath, 'a'))
assert len(streams) != 0, "cannot configure logger for 0 streams"
stream = streams[0] if len(streams) == 1 else Stream(*streams)
atexit.register(stream.close)
# a global level struct log config unless otherwise specified.
structlog.configure(
processors=processors,
context_class=dict,
logger_factory=LevelLoggerFactory(stream, level=level),
wrapper_class=BoundLevelLogger,
cache_logger_on_first_use=True,
)
# TODO take care of removing other handlers
stdlib_root_log = logging.getLogger()
stdlib_root_log.addHandler(StdlibStructlogHandler())
stdlib_root_log.setLevel(level)
_GLOBAL_LOG_CONFIGURED = True | [
"def",
"_configure_logger",
"(",
"fmt",
",",
"quiet",
",",
"level",
",",
"fpath",
",",
"pre_hooks",
",",
"post_hooks",
",",
"metric_grouping_interval",
")",
":",
"# NOTE not thread safe. Multiple BaseScripts cannot be instantiated concurrently.",
"level",
"=",
"getattr",
"(",
"logging",
",",
"level",
".",
"upper",
"(",
")",
")",
"global",
"_GLOBAL_LOG_CONFIGURED",
"if",
"_GLOBAL_LOG_CONFIGURED",
":",
"return",
"# since the hooks need to run through structlog, need to wrap them like processors",
"def",
"wrap_hook",
"(",
"fn",
")",
":",
"@",
"wraps",
"(",
"fn",
")",
"def",
"processor",
"(",
"logger",
",",
"method_name",
",",
"event_dict",
")",
":",
"fn",
"(",
"event_dict",
")",
"return",
"event_dict",
"return",
"processor",
"processors",
"=",
"define_log_processors",
"(",
")",
"processors",
".",
"extend",
"(",
"[",
"wrap_hook",
"(",
"h",
")",
"for",
"h",
"in",
"pre_hooks",
"]",
")",
"if",
"metric_grouping_interval",
":",
"processors",
".",
"append",
"(",
"metrics_grouping_processor",
")",
"log_renderer",
"=",
"define_log_renderer",
"(",
"fmt",
",",
"fpath",
",",
"quiet",
")",
"stderr_required",
"=",
"(",
"not",
"quiet",
")",
"pretty_to_stderr",
"=",
"(",
"stderr_required",
"and",
"(",
"fmt",
"==",
"\"pretty\"",
"or",
"(",
"fmt",
"is",
"None",
"and",
"sys",
".",
"stderr",
".",
"isatty",
"(",
")",
")",
")",
")",
"should_inject_pretty_renderer",
"=",
"(",
"pretty_to_stderr",
"and",
"not",
"isinstance",
"(",
"log_renderer",
",",
"structlog",
".",
"dev",
".",
"ConsoleRenderer",
")",
")",
"if",
"should_inject_pretty_renderer",
":",
"stderr_required",
"=",
"False",
"processors",
".",
"append",
"(",
"StderrConsoleRenderer",
"(",
")",
")",
"processors",
".",
"append",
"(",
"log_renderer",
")",
"processors",
".",
"extend",
"(",
"[",
"wrap_hook",
"(",
"h",
")",
"for",
"h",
"in",
"post_hooks",
"]",
")",
"streams",
"=",
"[",
"]",
"# we need to use a stream if we are writing to both file and stderr, and both are json",
"if",
"stderr_required",
":",
"streams",
".",
"append",
"(",
"sys",
".",
"stderr",
")",
"if",
"fpath",
"is",
"not",
"None",
":",
"# TODO handle creating a directory for this log file ?",
"# TODO set mode and encoding appropriately",
"streams",
".",
"append",
"(",
"open",
"(",
"fpath",
",",
"'a'",
")",
")",
"assert",
"len",
"(",
"streams",
")",
"!=",
"0",
",",
"\"cannot configure logger for 0 streams\"",
"stream",
"=",
"streams",
"[",
"0",
"]",
"if",
"len",
"(",
"streams",
")",
"==",
"1",
"else",
"Stream",
"(",
"*",
"streams",
")",
"atexit",
".",
"register",
"(",
"stream",
".",
"close",
")",
"# a global level struct log config unless otherwise specified.",
"structlog",
".",
"configure",
"(",
"processors",
"=",
"processors",
",",
"context_class",
"=",
"dict",
",",
"logger_factory",
"=",
"LevelLoggerFactory",
"(",
"stream",
",",
"level",
"=",
"level",
")",
",",
"wrapper_class",
"=",
"BoundLevelLogger",
",",
"cache_logger_on_first_use",
"=",
"True",
",",
")",
"# TODO take care of removing other handlers",
"stdlib_root_log",
"=",
"logging",
".",
"getLogger",
"(",
")",
"stdlib_root_log",
".",
"addHandler",
"(",
"StdlibStructlogHandler",
"(",
")",
")",
"stdlib_root_log",
".",
"setLevel",
"(",
"level",
")",
"_GLOBAL_LOG_CONFIGURED",
"=",
"True"
] | configures a logger when required write to stderr or a file | [
"configures",
"a",
"logger",
"when",
"required",
"write",
"to",
"stderr",
"or",
"a",
"file"
] | f7233963c5291530fcb2444a7f45b556e6407b90 | https://github.com/deep-compute/basescript/blob/f7233963c5291530fcb2444a7f45b556e6407b90/basescript/log.py#L366-L447 | train |
deep-compute/basescript | basescript/log.py | BoundLevelLogger._add_base_info | def _add_base_info(self, event_dict):
"""
Instead of using a processor, adding basic information like caller, filename etc
here.
"""
f = sys._getframe()
level_method_frame = f.f_back
caller_frame = level_method_frame.f_back
return event_dict | python | def _add_base_info(self, event_dict):
"""
Instead of using a processor, adding basic information like caller, filename etc
here.
"""
f = sys._getframe()
level_method_frame = f.f_back
caller_frame = level_method_frame.f_back
return event_dict | [
"def",
"_add_base_info",
"(",
"self",
",",
"event_dict",
")",
":",
"f",
"=",
"sys",
".",
"_getframe",
"(",
")",
"level_method_frame",
"=",
"f",
".",
"f_back",
"caller_frame",
"=",
"level_method_frame",
".",
"f_back",
"return",
"event_dict"
] | Instead of using a processor, adding basic information like caller, filename etc
here. | [
"Instead",
"of",
"using",
"a",
"processor",
"adding",
"basic",
"information",
"like",
"caller",
"filename",
"etc",
"here",
"."
] | f7233963c5291530fcb2444a7f45b556e6407b90 | https://github.com/deep-compute/basescript/blob/f7233963c5291530fcb2444a7f45b556e6407b90/basescript/log.py#L121-L129 | train |
deep-compute/basescript | basescript/log.py | BoundLevelLogger._proxy_to_logger | def _proxy_to_logger(self, method_name, event, *event_args,
**event_kw):
"""
Propagate a method call to the wrapped logger.
This is the same as the superclass implementation, except that
it also preserves positional arguments in the `event_dict` so
that the stdblib's support for format strings can be used.
"""
if isinstance(event, bytes):
event = event.decode('utf-8')
if event_args:
event_kw['positional_args'] = event_args
return super(BoundLevelLogger, self)._proxy_to_logger(method_name,
event=event,
**event_kw) | python | def _proxy_to_logger(self, method_name, event, *event_args,
**event_kw):
"""
Propagate a method call to the wrapped logger.
This is the same as the superclass implementation, except that
it also preserves positional arguments in the `event_dict` so
that the stdblib's support for format strings can be used.
"""
if isinstance(event, bytes):
event = event.decode('utf-8')
if event_args:
event_kw['positional_args'] = event_args
return super(BoundLevelLogger, self)._proxy_to_logger(method_name,
event=event,
**event_kw) | [
"def",
"_proxy_to_logger",
"(",
"self",
",",
"method_name",
",",
"event",
",",
"*",
"event_args",
",",
"*",
"*",
"event_kw",
")",
":",
"if",
"isinstance",
"(",
"event",
",",
"bytes",
")",
":",
"event",
"=",
"event",
".",
"decode",
"(",
"'utf-8'",
")",
"if",
"event_args",
":",
"event_kw",
"[",
"'positional_args'",
"]",
"=",
"event_args",
"return",
"super",
"(",
"BoundLevelLogger",
",",
"self",
")",
".",
"_proxy_to_logger",
"(",
"method_name",
",",
"event",
"=",
"event",
",",
"*",
"*",
"event_kw",
")"
] | Propagate a method call to the wrapped logger.
This is the same as the superclass implementation, except that
it also preserves positional arguments in the `event_dict` so
that the stdblib's support for format strings can be used. | [
"Propagate",
"a",
"method",
"call",
"to",
"the",
"wrapped",
"logger",
"."
] | f7233963c5291530fcb2444a7f45b556e6407b90 | https://github.com/deep-compute/basescript/blob/f7233963c5291530fcb2444a7f45b556e6407b90/basescript/log.py#L211-L229 | train |
smdabdoub/phylotoast | bin/core_overlap_plot.py | translate | def translate(rect, x, y, width=1):
"""
Given four points of a rectangle, translate the
rectangle to the specified x and y coordinates and,
optionally, change the width.
:type rect: list of tuples
:param rect: Four points describing a rectangle.
:type x: float
:param x: The amount to shift the rectangle along the x-axis.
:type y: float
:param y: The amount to shift the rectangle along the y-axis.
:type width: float
:param width: The amount by which to change the width of the
rectangle.
"""
return ((rect[0][0]+x, rect[0][1]+y), (rect[1][0]+x, rect[1][1]+y),
(rect[2][0]+x+width, rect[2][1]+y), (rect[3][0]+x+width, rect[3][1]+y)) | python | def translate(rect, x, y, width=1):
"""
Given four points of a rectangle, translate the
rectangle to the specified x and y coordinates and,
optionally, change the width.
:type rect: list of tuples
:param rect: Four points describing a rectangle.
:type x: float
:param x: The amount to shift the rectangle along the x-axis.
:type y: float
:param y: The amount to shift the rectangle along the y-axis.
:type width: float
:param width: The amount by which to change the width of the
rectangle.
"""
return ((rect[0][0]+x, rect[0][1]+y), (rect[1][0]+x, rect[1][1]+y),
(rect[2][0]+x+width, rect[2][1]+y), (rect[3][0]+x+width, rect[3][1]+y)) | [
"def",
"translate",
"(",
"rect",
",",
"x",
",",
"y",
",",
"width",
"=",
"1",
")",
":",
"return",
"(",
"(",
"rect",
"[",
"0",
"]",
"[",
"0",
"]",
"+",
"x",
",",
"rect",
"[",
"0",
"]",
"[",
"1",
"]",
"+",
"y",
")",
",",
"(",
"rect",
"[",
"1",
"]",
"[",
"0",
"]",
"+",
"x",
",",
"rect",
"[",
"1",
"]",
"[",
"1",
"]",
"+",
"y",
")",
",",
"(",
"rect",
"[",
"2",
"]",
"[",
"0",
"]",
"+",
"x",
"+",
"width",
",",
"rect",
"[",
"2",
"]",
"[",
"1",
"]",
"+",
"y",
")",
",",
"(",
"rect",
"[",
"3",
"]",
"[",
"0",
"]",
"+",
"x",
"+",
"width",
",",
"rect",
"[",
"3",
"]",
"[",
"1",
"]",
"+",
"y",
")",
")"
] | Given four points of a rectangle, translate the
rectangle to the specified x and y coordinates and,
optionally, change the width.
:type rect: list of tuples
:param rect: Four points describing a rectangle.
:type x: float
:param x: The amount to shift the rectangle along the x-axis.
:type y: float
:param y: The amount to shift the rectangle along the y-axis.
:type width: float
:param width: The amount by which to change the width of the
rectangle. | [
"Given",
"four",
"points",
"of",
"a",
"rectangle",
"translate",
"the",
"rectangle",
"to",
"the",
"specified",
"x",
"and",
"y",
"coordinates",
"and",
"optionally",
"change",
"the",
"width",
"."
] | 0b74ef171e6a84761710548501dfac71285a58a3 | https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/core_overlap_plot.py#L57-L74 | train |
christophertbrown/bioscripts | ctbBio/rax.py | remove_bad | def remove_bad(string):
"""
remove problem characters from string
"""
remove = [':', ',', '(', ')', ' ', '|', ';', '\'']
for c in remove:
string = string.replace(c, '_')
return string | python | def remove_bad(string):
"""
remove problem characters from string
"""
remove = [':', ',', '(', ')', ' ', '|', ';', '\'']
for c in remove:
string = string.replace(c, '_')
return string | [
"def",
"remove_bad",
"(",
"string",
")",
":",
"remove",
"=",
"[",
"':'",
",",
"','",
",",
"'('",
",",
"')'",
",",
"' '",
",",
"'|'",
",",
"';'",
",",
"'\\''",
"]",
"for",
"c",
"in",
"remove",
":",
"string",
"=",
"string",
".",
"replace",
"(",
"c",
",",
"'_'",
")",
"return",
"string"
] | remove problem characters from string | [
"remove",
"problem",
"characters",
"from",
"string"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rax.py#L43-L50 | train |
christophertbrown/bioscripts | ctbBio/rax.py | get_ids | def get_ids(a):
"""
make copy of sequences with short identifier
"""
a_id = '%s.id.fa' % (a.rsplit('.', 1)[0])
a_id_lookup = '%s.id.lookup' % (a.rsplit('.', 1)[0])
if check(a_id) is True:
return a_id, a_id_lookup
a_id_f = open(a_id, 'w')
a_id_lookup_f = open(a_id_lookup, 'w')
ids = []
for seq in parse_fasta(open(a)):
id = id_generator()
while id in ids:
id = id_generator()
ids.append(id)
header = seq[0].split('>')[1]
name = remove_bad(header)
seq[0] = '>%s %s' % (id, header)
print('\n'.join(seq), file=a_id_f)
print('%s\t%s\t%s' % (id, name, header), file=a_id_lookup_f)
return a_id, a_id_lookup | python | def get_ids(a):
"""
make copy of sequences with short identifier
"""
a_id = '%s.id.fa' % (a.rsplit('.', 1)[0])
a_id_lookup = '%s.id.lookup' % (a.rsplit('.', 1)[0])
if check(a_id) is True:
return a_id, a_id_lookup
a_id_f = open(a_id, 'w')
a_id_lookup_f = open(a_id_lookup, 'w')
ids = []
for seq in parse_fasta(open(a)):
id = id_generator()
while id in ids:
id = id_generator()
ids.append(id)
header = seq[0].split('>')[1]
name = remove_bad(header)
seq[0] = '>%s %s' % (id, header)
print('\n'.join(seq), file=a_id_f)
print('%s\t%s\t%s' % (id, name, header), file=a_id_lookup_f)
return a_id, a_id_lookup | [
"def",
"get_ids",
"(",
"a",
")",
":",
"a_id",
"=",
"'%s.id.fa'",
"%",
"(",
"a",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"[",
"0",
"]",
")",
"a_id_lookup",
"=",
"'%s.id.lookup'",
"%",
"(",
"a",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"[",
"0",
"]",
")",
"if",
"check",
"(",
"a_id",
")",
"is",
"True",
":",
"return",
"a_id",
",",
"a_id_lookup",
"a_id_f",
"=",
"open",
"(",
"a_id",
",",
"'w'",
")",
"a_id_lookup_f",
"=",
"open",
"(",
"a_id_lookup",
",",
"'w'",
")",
"ids",
"=",
"[",
"]",
"for",
"seq",
"in",
"parse_fasta",
"(",
"open",
"(",
"a",
")",
")",
":",
"id",
"=",
"id_generator",
"(",
")",
"while",
"id",
"in",
"ids",
":",
"id",
"=",
"id_generator",
"(",
")",
"ids",
".",
"append",
"(",
"id",
")",
"header",
"=",
"seq",
"[",
"0",
"]",
".",
"split",
"(",
"'>'",
")",
"[",
"1",
"]",
"name",
"=",
"remove_bad",
"(",
"header",
")",
"seq",
"[",
"0",
"]",
"=",
"'>%s %s'",
"%",
"(",
"id",
",",
"header",
")",
"print",
"(",
"'\\n'",
".",
"join",
"(",
"seq",
")",
",",
"file",
"=",
"a_id_f",
")",
"print",
"(",
"'%s\\t%s\\t%s'",
"%",
"(",
"id",
",",
"name",
",",
"header",
")",
",",
"file",
"=",
"a_id_lookup_f",
")",
"return",
"a_id",
",",
"a_id_lookup"
] | make copy of sequences with short identifier | [
"make",
"copy",
"of",
"sequences",
"with",
"short",
"identifier"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rax.py#L55-L76 | train |
christophertbrown/bioscripts | ctbBio/rax.py | convert2phylip | def convert2phylip(convert):
"""
convert fasta to phylip because RAxML is ridiculous
"""
out = '%s.phy' % (convert.rsplit('.', 1)[0])
if check(out) is False:
convert = open(convert, 'rU')
out_f = open(out, 'w')
alignments = AlignIO.parse(convert, "fasta")
AlignIO.write(alignments, out, "phylip")
return out | python | def convert2phylip(convert):
"""
convert fasta to phylip because RAxML is ridiculous
"""
out = '%s.phy' % (convert.rsplit('.', 1)[0])
if check(out) is False:
convert = open(convert, 'rU')
out_f = open(out, 'w')
alignments = AlignIO.parse(convert, "fasta")
AlignIO.write(alignments, out, "phylip")
return out | [
"def",
"convert2phylip",
"(",
"convert",
")",
":",
"out",
"=",
"'%s.phy'",
"%",
"(",
"convert",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"[",
"0",
"]",
")",
"if",
"check",
"(",
"out",
")",
"is",
"False",
":",
"convert",
"=",
"open",
"(",
"convert",
",",
"'rU'",
")",
"out_f",
"=",
"open",
"(",
"out",
",",
"'w'",
")",
"alignments",
"=",
"AlignIO",
".",
"parse",
"(",
"convert",
",",
"\"fasta\"",
")",
"AlignIO",
".",
"write",
"(",
"alignments",
",",
"out",
",",
"\"phylip\"",
")",
"return",
"out"
] | convert fasta to phylip because RAxML is ridiculous | [
"convert",
"fasta",
"to",
"phylip",
"because",
"RAxML",
"is",
"ridiculous"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rax.py#L78-L88 | train |
christophertbrown/bioscripts | ctbBio/rax.py | run_iqtree | def run_iqtree(phy, model, threads, cluster, node):
"""
run IQ-Tree
"""
# set ppn based on threads
if threads > 24:
ppn = 24
else:
ppn = threads
tree = '%s.treefile' % (phy)
if check(tree) is False:
if model is False:
model = 'TEST'
dir = os.getcwd()
command = 'iqtree-omp -s %s -m %s -nt %s -quiet' % \
(phy, model, threads)
if cluster is False:
p = Popen(command, shell = True)
else:
if node is False:
node = '1'
qsub = 'qsub -l nodes=%s:ppn=%s -m e -N iqtree' % (node, ppn)
command = 'cd /tmp; mkdir iqtree; cd iqtree; cp %s/%s .; %s; mv * %s/; rm -r ../iqtree' \
% (dir, phy, command, dir)
re_call = 'cd %s; %s --no-fast --iq' % (dir.rsplit('/', 1)[0], ' '.join(sys.argv))
p = Popen('echo "%s;%s" | %s' % (command, re_call, qsub), shell = True)
p.communicate()
return tree | python | def run_iqtree(phy, model, threads, cluster, node):
"""
run IQ-Tree
"""
# set ppn based on threads
if threads > 24:
ppn = 24
else:
ppn = threads
tree = '%s.treefile' % (phy)
if check(tree) is False:
if model is False:
model = 'TEST'
dir = os.getcwd()
command = 'iqtree-omp -s %s -m %s -nt %s -quiet' % \
(phy, model, threads)
if cluster is False:
p = Popen(command, shell = True)
else:
if node is False:
node = '1'
qsub = 'qsub -l nodes=%s:ppn=%s -m e -N iqtree' % (node, ppn)
command = 'cd /tmp; mkdir iqtree; cd iqtree; cp %s/%s .; %s; mv * %s/; rm -r ../iqtree' \
% (dir, phy, command, dir)
re_call = 'cd %s; %s --no-fast --iq' % (dir.rsplit('/', 1)[0], ' '.join(sys.argv))
p = Popen('echo "%s;%s" | %s' % (command, re_call, qsub), shell = True)
p.communicate()
return tree | [
"def",
"run_iqtree",
"(",
"phy",
",",
"model",
",",
"threads",
",",
"cluster",
",",
"node",
")",
":",
"# set ppn based on threads",
"if",
"threads",
">",
"24",
":",
"ppn",
"=",
"24",
"else",
":",
"ppn",
"=",
"threads",
"tree",
"=",
"'%s.treefile'",
"%",
"(",
"phy",
")",
"if",
"check",
"(",
"tree",
")",
"is",
"False",
":",
"if",
"model",
"is",
"False",
":",
"model",
"=",
"'TEST'",
"dir",
"=",
"os",
".",
"getcwd",
"(",
")",
"command",
"=",
"'iqtree-omp -s %s -m %s -nt %s -quiet'",
"%",
"(",
"phy",
",",
"model",
",",
"threads",
")",
"if",
"cluster",
"is",
"False",
":",
"p",
"=",
"Popen",
"(",
"command",
",",
"shell",
"=",
"True",
")",
"else",
":",
"if",
"node",
"is",
"False",
":",
"node",
"=",
"'1'",
"qsub",
"=",
"'qsub -l nodes=%s:ppn=%s -m e -N iqtree'",
"%",
"(",
"node",
",",
"ppn",
")",
"command",
"=",
"'cd /tmp; mkdir iqtree; cd iqtree; cp %s/%s .; %s; mv * %s/; rm -r ../iqtree'",
"%",
"(",
"dir",
",",
"phy",
",",
"command",
",",
"dir",
")",
"re_call",
"=",
"'cd %s; %s --no-fast --iq'",
"%",
"(",
"dir",
".",
"rsplit",
"(",
"'/'",
",",
"1",
")",
"[",
"0",
"]",
",",
"' '",
".",
"join",
"(",
"sys",
".",
"argv",
")",
")",
"p",
"=",
"Popen",
"(",
"'echo \"%s;%s\" | %s'",
"%",
"(",
"command",
",",
"re_call",
",",
"qsub",
")",
",",
"shell",
"=",
"True",
")",
"p",
".",
"communicate",
"(",
")",
"return",
"tree"
] | run IQ-Tree | [
"run",
"IQ",
"-",
"Tree"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rax.py#L163-L190 | train |
christophertbrown/bioscripts | ctbBio/rax.py | fix_tree | def fix_tree(tree, a_id_lookup, out):
"""
get the names for sequences in the raxml tree
"""
if check(out) is False and check(tree) is True:
tree = open(tree).read()
for line in open(a_id_lookup):
id, name, header = line.strip().split('\t')
tree = tree.replace(id+':', name+':')
out_f = open(out, 'w')
print(tree.strip(), file=out_f)
return out | python | def fix_tree(tree, a_id_lookup, out):
"""
get the names for sequences in the raxml tree
"""
if check(out) is False and check(tree) is True:
tree = open(tree).read()
for line in open(a_id_lookup):
id, name, header = line.strip().split('\t')
tree = tree.replace(id+':', name+':')
out_f = open(out, 'w')
print(tree.strip(), file=out_f)
return out | [
"def",
"fix_tree",
"(",
"tree",
",",
"a_id_lookup",
",",
"out",
")",
":",
"if",
"check",
"(",
"out",
")",
"is",
"False",
"and",
"check",
"(",
"tree",
")",
"is",
"True",
":",
"tree",
"=",
"open",
"(",
"tree",
")",
".",
"read",
"(",
")",
"for",
"line",
"in",
"open",
"(",
"a_id_lookup",
")",
":",
"id",
",",
"name",
",",
"header",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\t'",
")",
"tree",
"=",
"tree",
".",
"replace",
"(",
"id",
"+",
"':'",
",",
"name",
"+",
"':'",
")",
"out_f",
"=",
"open",
"(",
"out",
",",
"'w'",
")",
"print",
"(",
"tree",
".",
"strip",
"(",
")",
",",
"file",
"=",
"out_f",
")",
"return",
"out"
] | get the names for sequences in the raxml tree | [
"get",
"the",
"names",
"for",
"sequences",
"in",
"the",
"raxml",
"tree"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rax.py#L192-L203 | train |
disqus/nydus | nydus/db/__init__.py | create_cluster | def create_cluster(settings):
"""
Creates a new Nydus cluster from the given settings.
:param settings: Dictionary of the cluster settings.
:returns: Configured instance of ``nydus.db.base.Cluster``.
>>> redis = create_cluster({
>>> 'backend': 'nydus.db.backends.redis.Redis',
>>> 'router': 'nydus.db.routers.redis.PartitionRouter',
>>> 'defaults': {
>>> 'host': 'localhost',
>>> 'port': 6379,
>>> },
>>> 'hosts': {
>>> 0: {'db': 0},
>>> 1: {'db': 1},
>>> 2: {'db': 2},
>>> }
>>> })
"""
# Pull in our client
settings = copy.deepcopy(settings)
backend = settings.pop('engine', settings.pop('backend', None))
if isinstance(backend, basestring):
Conn = import_string(backend)
elif backend:
Conn = backend
else:
raise KeyError('backend')
# Pull in our cluster
cluster = settings.pop('cluster', None)
if not cluster:
Cluster = Conn.get_cluster()
elif isinstance(cluster, basestring):
Cluster = import_string(cluster)
else:
Cluster = cluster
# Pull in our router
router = settings.pop('router', None)
if not router:
Router = BaseRouter
elif isinstance(router, basestring):
Router = import_string(router)
else:
Router = router
# Build the connection cluster
return Cluster(
router=Router,
backend=Conn,
**settings
) | python | def create_cluster(settings):
"""
Creates a new Nydus cluster from the given settings.
:param settings: Dictionary of the cluster settings.
:returns: Configured instance of ``nydus.db.base.Cluster``.
>>> redis = create_cluster({
>>> 'backend': 'nydus.db.backends.redis.Redis',
>>> 'router': 'nydus.db.routers.redis.PartitionRouter',
>>> 'defaults': {
>>> 'host': 'localhost',
>>> 'port': 6379,
>>> },
>>> 'hosts': {
>>> 0: {'db': 0},
>>> 1: {'db': 1},
>>> 2: {'db': 2},
>>> }
>>> })
"""
# Pull in our client
settings = copy.deepcopy(settings)
backend = settings.pop('engine', settings.pop('backend', None))
if isinstance(backend, basestring):
Conn = import_string(backend)
elif backend:
Conn = backend
else:
raise KeyError('backend')
# Pull in our cluster
cluster = settings.pop('cluster', None)
if not cluster:
Cluster = Conn.get_cluster()
elif isinstance(cluster, basestring):
Cluster = import_string(cluster)
else:
Cluster = cluster
# Pull in our router
router = settings.pop('router', None)
if not router:
Router = BaseRouter
elif isinstance(router, basestring):
Router = import_string(router)
else:
Router = router
# Build the connection cluster
return Cluster(
router=Router,
backend=Conn,
**settings
) | [
"def",
"create_cluster",
"(",
"settings",
")",
":",
"# Pull in our client",
"settings",
"=",
"copy",
".",
"deepcopy",
"(",
"settings",
")",
"backend",
"=",
"settings",
".",
"pop",
"(",
"'engine'",
",",
"settings",
".",
"pop",
"(",
"'backend'",
",",
"None",
")",
")",
"if",
"isinstance",
"(",
"backend",
",",
"basestring",
")",
":",
"Conn",
"=",
"import_string",
"(",
"backend",
")",
"elif",
"backend",
":",
"Conn",
"=",
"backend",
"else",
":",
"raise",
"KeyError",
"(",
"'backend'",
")",
"# Pull in our cluster",
"cluster",
"=",
"settings",
".",
"pop",
"(",
"'cluster'",
",",
"None",
")",
"if",
"not",
"cluster",
":",
"Cluster",
"=",
"Conn",
".",
"get_cluster",
"(",
")",
"elif",
"isinstance",
"(",
"cluster",
",",
"basestring",
")",
":",
"Cluster",
"=",
"import_string",
"(",
"cluster",
")",
"else",
":",
"Cluster",
"=",
"cluster",
"# Pull in our router",
"router",
"=",
"settings",
".",
"pop",
"(",
"'router'",
",",
"None",
")",
"if",
"not",
"router",
":",
"Router",
"=",
"BaseRouter",
"elif",
"isinstance",
"(",
"router",
",",
"basestring",
")",
":",
"Router",
"=",
"import_string",
"(",
"router",
")",
"else",
":",
"Router",
"=",
"router",
"# Build the connection cluster",
"return",
"Cluster",
"(",
"router",
"=",
"Router",
",",
"backend",
"=",
"Conn",
",",
"*",
"*",
"settings",
")"
] | Creates a new Nydus cluster from the given settings.
:param settings: Dictionary of the cluster settings.
:returns: Configured instance of ``nydus.db.base.Cluster``.
>>> redis = create_cluster({
>>> 'backend': 'nydus.db.backends.redis.Redis',
>>> 'router': 'nydus.db.routers.redis.PartitionRouter',
>>> 'defaults': {
>>> 'host': 'localhost',
>>> 'port': 6379,
>>> },
>>> 'hosts': {
>>> 0: {'db': 0},
>>> 1: {'db': 1},
>>> 2: {'db': 2},
>>> }
>>> }) | [
"Creates",
"a",
"new",
"Nydus",
"cluster",
"from",
"the",
"given",
"settings",
"."
] | 9b505840da47a34f758a830c3992fa5dcb7bb7ad | https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/db/__init__.py#L28-L82 | train |
dokterbob/django-multilingual-model | multilingual_model/models.py | MultilingualModel._get_translation | def _get_translation(self, field, code):
"""
Gets the translation of a specific field for a specific language code.
This raises ObjectDoesNotExist if the lookup was unsuccesful. As of
today, this stuff is cached. As the cache is rather aggressive it
might cause rather strange effects. However, we would see the same
effects when an ordinary object is changed which is already in memory:
the old state would remain.
"""
if not code in self._translation_cache:
translations = self.translations.select_related()
logger.debug(
u'Matched with field %s for language %s. Attempting lookup.',
field, code
)
try:
translation_obj = translations.get(language_code=code)
except ObjectDoesNotExist:
translation_obj = None
self._translation_cache[code] = translation_obj
logger.debug(u'Translation not found in cache.')
else:
logger.debug(u'Translation found in cache.')
# Get the translation from the cache
translation_obj = self._translation_cache.get(code)
# If this is none, it means that a translation does not exist
# It is important to cache this one as well
if not translation_obj:
raise ObjectDoesNotExist
field_value = getattr(translation_obj, field)
logger.debug(
u'Found translation object %s, returning value %s.',
translation_obj, field_value
)
return field_value | python | def _get_translation(self, field, code):
"""
Gets the translation of a specific field for a specific language code.
This raises ObjectDoesNotExist if the lookup was unsuccesful. As of
today, this stuff is cached. As the cache is rather aggressive it
might cause rather strange effects. However, we would see the same
effects when an ordinary object is changed which is already in memory:
the old state would remain.
"""
if not code in self._translation_cache:
translations = self.translations.select_related()
logger.debug(
u'Matched with field %s for language %s. Attempting lookup.',
field, code
)
try:
translation_obj = translations.get(language_code=code)
except ObjectDoesNotExist:
translation_obj = None
self._translation_cache[code] = translation_obj
logger.debug(u'Translation not found in cache.')
else:
logger.debug(u'Translation found in cache.')
# Get the translation from the cache
translation_obj = self._translation_cache.get(code)
# If this is none, it means that a translation does not exist
# It is important to cache this one as well
if not translation_obj:
raise ObjectDoesNotExist
field_value = getattr(translation_obj, field)
logger.debug(
u'Found translation object %s, returning value %s.',
translation_obj, field_value
)
return field_value | [
"def",
"_get_translation",
"(",
"self",
",",
"field",
",",
"code",
")",
":",
"if",
"not",
"code",
"in",
"self",
".",
"_translation_cache",
":",
"translations",
"=",
"self",
".",
"translations",
".",
"select_related",
"(",
")",
"logger",
".",
"debug",
"(",
"u'Matched with field %s for language %s. Attempting lookup.'",
",",
"field",
",",
"code",
")",
"try",
":",
"translation_obj",
"=",
"translations",
".",
"get",
"(",
"language_code",
"=",
"code",
")",
"except",
"ObjectDoesNotExist",
":",
"translation_obj",
"=",
"None",
"self",
".",
"_translation_cache",
"[",
"code",
"]",
"=",
"translation_obj",
"logger",
".",
"debug",
"(",
"u'Translation not found in cache.'",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"u'Translation found in cache.'",
")",
"# Get the translation from the cache",
"translation_obj",
"=",
"self",
".",
"_translation_cache",
".",
"get",
"(",
"code",
")",
"# If this is none, it means that a translation does not exist",
"# It is important to cache this one as well",
"if",
"not",
"translation_obj",
":",
"raise",
"ObjectDoesNotExist",
"field_value",
"=",
"getattr",
"(",
"translation_obj",
",",
"field",
")",
"logger",
".",
"debug",
"(",
"u'Found translation object %s, returning value %s.'",
",",
"translation_obj",
",",
"field_value",
")",
"return",
"field_value"
] | Gets the translation of a specific field for a specific language code.
This raises ObjectDoesNotExist if the lookup was unsuccesful. As of
today, this stuff is cached. As the cache is rather aggressive it
might cause rather strange effects. However, we would see the same
effects when an ordinary object is changed which is already in memory:
the old state would remain. | [
"Gets",
"the",
"translation",
"of",
"a",
"specific",
"field",
"for",
"a",
"specific",
"language",
"code",
"."
] | 2479b2c3d6f7b697e95aa1e082c8bc8699f1f638 | https://github.com/dokterbob/django-multilingual-model/blob/2479b2c3d6f7b697e95aa1e082c8bc8699f1f638/multilingual_model/models.py#L44-L90 | train |
dokterbob/django-multilingual-model | multilingual_model/models.py | MultilingualModel.unicode_wrapper | def unicode_wrapper(self, property, default=ugettext('Untitled')):
"""
Wrapper to allow for easy unicode representation of an object by
the specified property. If this wrapper is not able to find the
right translation of the specified property, it will return the
default value instead.
Example::
def __unicode__(self):
return unicode_wrapper('name', default='Unnamed')
"""
# TODO: Test coverage!
try:
value = getattr(self, property)
except ValueError:
logger.warn(
u'ValueError rendering unicode for %s object.',
self._meta.object_name
)
value = None
if not value:
value = default
return value | python | def unicode_wrapper(self, property, default=ugettext('Untitled')):
"""
Wrapper to allow for easy unicode representation of an object by
the specified property. If this wrapper is not able to find the
right translation of the specified property, it will return the
default value instead.
Example::
def __unicode__(self):
return unicode_wrapper('name', default='Unnamed')
"""
# TODO: Test coverage!
try:
value = getattr(self, property)
except ValueError:
logger.warn(
u'ValueError rendering unicode for %s object.',
self._meta.object_name
)
value = None
if not value:
value = default
return value | [
"def",
"unicode_wrapper",
"(",
"self",
",",
"property",
",",
"default",
"=",
"ugettext",
"(",
"'Untitled'",
")",
")",
":",
"# TODO: Test coverage!",
"try",
":",
"value",
"=",
"getattr",
"(",
"self",
",",
"property",
")",
"except",
"ValueError",
":",
"logger",
".",
"warn",
"(",
"u'ValueError rendering unicode for %s object.'",
",",
"self",
".",
"_meta",
".",
"object_name",
")",
"value",
"=",
"None",
"if",
"not",
"value",
":",
"value",
"=",
"default",
"return",
"value"
] | Wrapper to allow for easy unicode representation of an object by
the specified property. If this wrapper is not able to find the
right translation of the specified property, it will return the
default value instead.
Example::
def __unicode__(self):
return unicode_wrapper('name', default='Unnamed') | [
"Wrapper",
"to",
"allow",
"for",
"easy",
"unicode",
"representation",
"of",
"an",
"object",
"by",
"the",
"specified",
"property",
".",
"If",
"this",
"wrapper",
"is",
"not",
"able",
"to",
"find",
"the",
"right",
"translation",
"of",
"the",
"specified",
"property",
"it",
"will",
"return",
"the",
"default",
"value",
"instead",
"."
] | 2479b2c3d6f7b697e95aa1e082c8bc8699f1f638 | https://github.com/dokterbob/django-multilingual-model/blob/2479b2c3d6f7b697e95aa1e082c8bc8699f1f638/multilingual_model/models.py#L202-L228 | train |
christophertbrown/bioscripts | ctbBio/strip_align_inserts.py | strip_inserts | def strip_inserts(fasta):
"""
remove insertion columns from aligned fasta file
"""
for seq in parse_fasta(fasta):
seq[1] = ''.join([b for b in seq[1] if b == '-' or b.isupper()])
yield seq | python | def strip_inserts(fasta):
"""
remove insertion columns from aligned fasta file
"""
for seq in parse_fasta(fasta):
seq[1] = ''.join([b for b in seq[1] if b == '-' or b.isupper()])
yield seq | [
"def",
"strip_inserts",
"(",
"fasta",
")",
":",
"for",
"seq",
"in",
"parse_fasta",
"(",
"fasta",
")",
":",
"seq",
"[",
"1",
"]",
"=",
"''",
".",
"join",
"(",
"[",
"b",
"for",
"b",
"in",
"seq",
"[",
"1",
"]",
"if",
"b",
"==",
"'-'",
"or",
"b",
".",
"isupper",
"(",
")",
"]",
")",
"yield",
"seq"
] | remove insertion columns from aligned fasta file | [
"remove",
"insertion",
"columns",
"from",
"aligned",
"fasta",
"file"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/strip_align_inserts.py#L12-L18 | train |
cldf/segments | src/segments/tokenizer.py | Tokenizer.transform | def transform(self, word, column=Profile.GRAPHEME_COL, error=errors.replace):
"""
Transform a string's graphemes into the mappings given in a different column
in the orthography profile.
Parameters
----------
word : str
The input string to be tokenized.
column : str (default = "Grapheme")
The label of the column to transform to. Default it to tokenize with
orthography profile.
Returns
-------
result : list of lists
Result of the transformation.
"""
assert self.op, 'method can only be called with orthography profile.'
if column != Profile.GRAPHEME_COL and column not in self.op.column_labels:
raise ValueError("Column {0} not found in profile.".format(column))
word = self.op.tree.parse(word, error)
if column == Profile.GRAPHEME_COL:
return word
out = []
for token in word:
try:
target = self.op.graphemes[token][column]
except KeyError:
target = self._errors['replace'](token)
if target is not None:
if isinstance(target, (tuple, list)):
out.extend(target)
else:
out.append(target)
return out | python | def transform(self, word, column=Profile.GRAPHEME_COL, error=errors.replace):
"""
Transform a string's graphemes into the mappings given in a different column
in the orthography profile.
Parameters
----------
word : str
The input string to be tokenized.
column : str (default = "Grapheme")
The label of the column to transform to. Default it to tokenize with
orthography profile.
Returns
-------
result : list of lists
Result of the transformation.
"""
assert self.op, 'method can only be called with orthography profile.'
if column != Profile.GRAPHEME_COL and column not in self.op.column_labels:
raise ValueError("Column {0} not found in profile.".format(column))
word = self.op.tree.parse(word, error)
if column == Profile.GRAPHEME_COL:
return word
out = []
for token in word:
try:
target = self.op.graphemes[token][column]
except KeyError:
target = self._errors['replace'](token)
if target is not None:
if isinstance(target, (tuple, list)):
out.extend(target)
else:
out.append(target)
return out | [
"def",
"transform",
"(",
"self",
",",
"word",
",",
"column",
"=",
"Profile",
".",
"GRAPHEME_COL",
",",
"error",
"=",
"errors",
".",
"replace",
")",
":",
"assert",
"self",
".",
"op",
",",
"'method can only be called with orthography profile.'",
"if",
"column",
"!=",
"Profile",
".",
"GRAPHEME_COL",
"and",
"column",
"not",
"in",
"self",
".",
"op",
".",
"column_labels",
":",
"raise",
"ValueError",
"(",
"\"Column {0} not found in profile.\"",
".",
"format",
"(",
"column",
")",
")",
"word",
"=",
"self",
".",
"op",
".",
"tree",
".",
"parse",
"(",
"word",
",",
"error",
")",
"if",
"column",
"==",
"Profile",
".",
"GRAPHEME_COL",
":",
"return",
"word",
"out",
"=",
"[",
"]",
"for",
"token",
"in",
"word",
":",
"try",
":",
"target",
"=",
"self",
".",
"op",
".",
"graphemes",
"[",
"token",
"]",
"[",
"column",
"]",
"except",
"KeyError",
":",
"target",
"=",
"self",
".",
"_errors",
"[",
"'replace'",
"]",
"(",
"token",
")",
"if",
"target",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"target",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"out",
".",
"extend",
"(",
"target",
")",
"else",
":",
"out",
".",
"append",
"(",
"target",
")",
"return",
"out"
] | Transform a string's graphemes into the mappings given in a different column
in the orthography profile.
Parameters
----------
word : str
The input string to be tokenized.
column : str (default = "Grapheme")
The label of the column to transform to. Default it to tokenize with
orthography profile.
Returns
-------
result : list of lists
Result of the transformation. | [
"Transform",
"a",
"string",
"s",
"graphemes",
"into",
"the",
"mappings",
"given",
"in",
"a",
"different",
"column",
"in",
"the",
"orthography",
"profile",
"."
] | 9136a4ec89555bf9b574399ffbb07f3cc9a9f45f | https://github.com/cldf/segments/blob/9136a4ec89555bf9b574399ffbb07f3cc9a9f45f/src/segments/tokenizer.py#L231-L270 | train |
cldf/segments | src/segments/tokenizer.py | Tokenizer.rules | def rules(self, word):
"""
Function to tokenize input string and return output of str with ortho rules
applied.
Parameters
----------
word : str
The input string to be tokenized.
Returns
-------
result : str
Result of the orthography rules applied to the input str.
"""
return self._rules.apply(word) if self._rules else word | python | def rules(self, word):
"""
Function to tokenize input string and return output of str with ortho rules
applied.
Parameters
----------
word : str
The input string to be tokenized.
Returns
-------
result : str
Result of the orthography rules applied to the input str.
"""
return self._rules.apply(word) if self._rules else word | [
"def",
"rules",
"(",
"self",
",",
"word",
")",
":",
"return",
"self",
".",
"_rules",
".",
"apply",
"(",
"word",
")",
"if",
"self",
".",
"_rules",
"else",
"word"
] | Function to tokenize input string and return output of str with ortho rules
applied.
Parameters
----------
word : str
The input string to be tokenized.
Returns
-------
result : str
Result of the orthography rules applied to the input str. | [
"Function",
"to",
"tokenize",
"input",
"string",
"and",
"return",
"output",
"of",
"str",
"with",
"ortho",
"rules",
"applied",
"."
] | 9136a4ec89555bf9b574399ffbb07f3cc9a9f45f | https://github.com/cldf/segments/blob/9136a4ec89555bf9b574399ffbb07f3cc9a9f45f/src/segments/tokenizer.py#L272-L288 | train |
cldf/segments | src/segments/tokenizer.py | Tokenizer.combine_modifiers | def combine_modifiers(self, graphemes):
"""
Given a string that is space-delimited on Unicode grapheme clusters,
group Unicode modifier letters with their preceding base characters,
deal with tie bars, etc.
Parameters
----------
string : str
A Unicode string tokenized into grapheme clusters to be tokenized into simple
IPA.
"""
result = []
temp = ""
count = len(graphemes)
for grapheme in reversed(graphemes):
count -= 1
if len(grapheme) == 1 and unicodedata.category(grapheme) == "Lm" \
and not ord(grapheme) in [712, 716]:
temp = grapheme + temp
# hack for the cases where a space modifier is the first character in the
# string
if count == 0:
result[-1] = temp + result[-1]
continue # pragma: no cover
# catch and repair stress marks
if len(grapheme) == 1 and ord(grapheme) in [712, 716]:
result[-1] = grapheme + result[-1]
temp = ""
continue
# combine contour tone marks (non-accents)
if len(grapheme) == 1 and unicodedata.category(grapheme) == "Sk":
if len(result) == 0:
result.append(grapheme)
temp = ""
continue
else:
if unicodedata.category(result[-1][0]) == "Sk":
result[-1] = grapheme + result[-1]
temp = ""
continue
result.append(grapheme + temp)
temp = ""
# last check for tie bars
segments = result[::-1]
i = 0
r = []
while i < len(segments):
# tie bars
if ord(segments[i][-1]) in [865, 860]:
r.append(segments[i] + segments[i + 1])
i += 2
else:
r.append(segments[i])
i += 1
return r | python | def combine_modifiers(self, graphemes):
"""
Given a string that is space-delimited on Unicode grapheme clusters,
group Unicode modifier letters with their preceding base characters,
deal with tie bars, etc.
Parameters
----------
string : str
A Unicode string tokenized into grapheme clusters to be tokenized into simple
IPA.
"""
result = []
temp = ""
count = len(graphemes)
for grapheme in reversed(graphemes):
count -= 1
if len(grapheme) == 1 and unicodedata.category(grapheme) == "Lm" \
and not ord(grapheme) in [712, 716]:
temp = grapheme + temp
# hack for the cases where a space modifier is the first character in the
# string
if count == 0:
result[-1] = temp + result[-1]
continue # pragma: no cover
# catch and repair stress marks
if len(grapheme) == 1 and ord(grapheme) in [712, 716]:
result[-1] = grapheme + result[-1]
temp = ""
continue
# combine contour tone marks (non-accents)
if len(grapheme) == 1 and unicodedata.category(grapheme) == "Sk":
if len(result) == 0:
result.append(grapheme)
temp = ""
continue
else:
if unicodedata.category(result[-1][0]) == "Sk":
result[-1] = grapheme + result[-1]
temp = ""
continue
result.append(grapheme + temp)
temp = ""
# last check for tie bars
segments = result[::-1]
i = 0
r = []
while i < len(segments):
# tie bars
if ord(segments[i][-1]) in [865, 860]:
r.append(segments[i] + segments[i + 1])
i += 2
else:
r.append(segments[i])
i += 1
return r | [
"def",
"combine_modifiers",
"(",
"self",
",",
"graphemes",
")",
":",
"result",
"=",
"[",
"]",
"temp",
"=",
"\"\"",
"count",
"=",
"len",
"(",
"graphemes",
")",
"for",
"grapheme",
"in",
"reversed",
"(",
"graphemes",
")",
":",
"count",
"-=",
"1",
"if",
"len",
"(",
"grapheme",
")",
"==",
"1",
"and",
"unicodedata",
".",
"category",
"(",
"grapheme",
")",
"==",
"\"Lm\"",
"and",
"not",
"ord",
"(",
"grapheme",
")",
"in",
"[",
"712",
",",
"716",
"]",
":",
"temp",
"=",
"grapheme",
"+",
"temp",
"# hack for the cases where a space modifier is the first character in the",
"# string",
"if",
"count",
"==",
"0",
":",
"result",
"[",
"-",
"1",
"]",
"=",
"temp",
"+",
"result",
"[",
"-",
"1",
"]",
"continue",
"# pragma: no cover",
"# catch and repair stress marks",
"if",
"len",
"(",
"grapheme",
")",
"==",
"1",
"and",
"ord",
"(",
"grapheme",
")",
"in",
"[",
"712",
",",
"716",
"]",
":",
"result",
"[",
"-",
"1",
"]",
"=",
"grapheme",
"+",
"result",
"[",
"-",
"1",
"]",
"temp",
"=",
"\"\"",
"continue",
"# combine contour tone marks (non-accents)",
"if",
"len",
"(",
"grapheme",
")",
"==",
"1",
"and",
"unicodedata",
".",
"category",
"(",
"grapheme",
")",
"==",
"\"Sk\"",
":",
"if",
"len",
"(",
"result",
")",
"==",
"0",
":",
"result",
".",
"append",
"(",
"grapheme",
")",
"temp",
"=",
"\"\"",
"continue",
"else",
":",
"if",
"unicodedata",
".",
"category",
"(",
"result",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
")",
"==",
"\"Sk\"",
":",
"result",
"[",
"-",
"1",
"]",
"=",
"grapheme",
"+",
"result",
"[",
"-",
"1",
"]",
"temp",
"=",
"\"\"",
"continue",
"result",
".",
"append",
"(",
"grapheme",
"+",
"temp",
")",
"temp",
"=",
"\"\"",
"# last check for tie bars",
"segments",
"=",
"result",
"[",
":",
":",
"-",
"1",
"]",
"i",
"=",
"0",
"r",
"=",
"[",
"]",
"while",
"i",
"<",
"len",
"(",
"segments",
")",
":",
"# tie bars",
"if",
"ord",
"(",
"segments",
"[",
"i",
"]",
"[",
"-",
"1",
"]",
")",
"in",
"[",
"865",
",",
"860",
"]",
":",
"r",
".",
"append",
"(",
"segments",
"[",
"i",
"]",
"+",
"segments",
"[",
"i",
"+",
"1",
"]",
")",
"i",
"+=",
"2",
"else",
":",
"r",
".",
"append",
"(",
"segments",
"[",
"i",
"]",
")",
"i",
"+=",
"1",
"return",
"r"
] | Given a string that is space-delimited on Unicode grapheme clusters,
group Unicode modifier letters with their preceding base characters,
deal with tie bars, etc.
Parameters
----------
string : str
A Unicode string tokenized into grapheme clusters to be tokenized into simple
IPA. | [
"Given",
"a",
"string",
"that",
"is",
"space",
"-",
"delimited",
"on",
"Unicode",
"grapheme",
"clusters",
"group",
"Unicode",
"modifier",
"letters",
"with",
"their",
"preceding",
"base",
"characters",
"deal",
"with",
"tie",
"bars",
"etc",
"."
] | 9136a4ec89555bf9b574399ffbb07f3cc9a9f45f | https://github.com/cldf/segments/blob/9136a4ec89555bf9b574399ffbb07f3cc9a9f45f/src/segments/tokenizer.py#L290-L349 | train |
christophertbrown/bioscripts | ctbBio/rRNA_insertions_gff.py | parse_catalytic | def parse_catalytic(insertion, gff):
"""
parse catalytic RNAs to gff format
"""
offset = insertion['offset']
GeneStrand = insertion['strand']
if type(insertion['intron']) is not str:
return gff
for intron in parse_fasta(insertion['intron'].split('|')):
ID, annot, strand, pos = intron[0].split('>')[1].split()
Start, End = [int(i) for i in pos.split('-')]
if strand != GeneStrand:
if strand == '+':
strand = '-'
else:
strand = '+'
Start, End = End - 2, Start - 2
Start, End = abs(Start + offset) - 1, abs(End + offset) - 1
gff['#seqname'].append(insertion['ID'])
gff['source'].append('Rfam')
gff['feature'].append('Catalytic RNA')
gff['start'].append(Start)
gff['end'].append(End)
gff['score'].append('.')
gff['strand'].append(strand)
gff['frame'].append('.')
gff['attribute'].append('ID=%s; Name=%s' % (ID, annot))
return gff | python | def parse_catalytic(insertion, gff):
"""
parse catalytic RNAs to gff format
"""
offset = insertion['offset']
GeneStrand = insertion['strand']
if type(insertion['intron']) is not str:
return gff
for intron in parse_fasta(insertion['intron'].split('|')):
ID, annot, strand, pos = intron[0].split('>')[1].split()
Start, End = [int(i) for i in pos.split('-')]
if strand != GeneStrand:
if strand == '+':
strand = '-'
else:
strand = '+'
Start, End = End - 2, Start - 2
Start, End = abs(Start + offset) - 1, abs(End + offset) - 1
gff['#seqname'].append(insertion['ID'])
gff['source'].append('Rfam')
gff['feature'].append('Catalytic RNA')
gff['start'].append(Start)
gff['end'].append(End)
gff['score'].append('.')
gff['strand'].append(strand)
gff['frame'].append('.')
gff['attribute'].append('ID=%s; Name=%s' % (ID, annot))
return gff | [
"def",
"parse_catalytic",
"(",
"insertion",
",",
"gff",
")",
":",
"offset",
"=",
"insertion",
"[",
"'offset'",
"]",
"GeneStrand",
"=",
"insertion",
"[",
"'strand'",
"]",
"if",
"type",
"(",
"insertion",
"[",
"'intron'",
"]",
")",
"is",
"not",
"str",
":",
"return",
"gff",
"for",
"intron",
"in",
"parse_fasta",
"(",
"insertion",
"[",
"'intron'",
"]",
".",
"split",
"(",
"'|'",
")",
")",
":",
"ID",
",",
"annot",
",",
"strand",
",",
"pos",
"=",
"intron",
"[",
"0",
"]",
".",
"split",
"(",
"'>'",
")",
"[",
"1",
"]",
".",
"split",
"(",
")",
"Start",
",",
"End",
"=",
"[",
"int",
"(",
"i",
")",
"for",
"i",
"in",
"pos",
".",
"split",
"(",
"'-'",
")",
"]",
"if",
"strand",
"!=",
"GeneStrand",
":",
"if",
"strand",
"==",
"'+'",
":",
"strand",
"=",
"'-'",
"else",
":",
"strand",
"=",
"'+'",
"Start",
",",
"End",
"=",
"End",
"-",
"2",
",",
"Start",
"-",
"2",
"Start",
",",
"End",
"=",
"abs",
"(",
"Start",
"+",
"offset",
")",
"-",
"1",
",",
"abs",
"(",
"End",
"+",
"offset",
")",
"-",
"1",
"gff",
"[",
"'#seqname'",
"]",
".",
"append",
"(",
"insertion",
"[",
"'ID'",
"]",
")",
"gff",
"[",
"'source'",
"]",
".",
"append",
"(",
"'Rfam'",
")",
"gff",
"[",
"'feature'",
"]",
".",
"append",
"(",
"'Catalytic RNA'",
")",
"gff",
"[",
"'start'",
"]",
".",
"append",
"(",
"Start",
")",
"gff",
"[",
"'end'",
"]",
".",
"append",
"(",
"End",
")",
"gff",
"[",
"'score'",
"]",
".",
"append",
"(",
"'.'",
")",
"gff",
"[",
"'strand'",
"]",
".",
"append",
"(",
"strand",
")",
"gff",
"[",
"'frame'",
"]",
".",
"append",
"(",
"'.'",
")",
"gff",
"[",
"'attribute'",
"]",
".",
"append",
"(",
"'ID=%s; Name=%s'",
"%",
"(",
"ID",
",",
"annot",
")",
")",
"return",
"gff"
] | parse catalytic RNAs to gff format | [
"parse",
"catalytic",
"RNAs",
"to",
"gff",
"format"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_insertions_gff.py#L13-L40 | train |
christophertbrown/bioscripts | ctbBio/rRNA_insertions_gff.py | parse_orf | def parse_orf(insertion, gff):
"""
parse ORF to gff format
"""
offset = insertion['offset']
if type(insertion['orf']) is not str:
return gff
for orf in parse_fasta(insertion['orf'].split('|')):
ID = orf[0].split('>')[1].split()[0]
Start, End, strand = [int(i) for i in orf[0].split(' # ')[1:4]]
if strand == 1:
strand = '+'
else:
strand = '-'
GeneStrand = insertion['strand']
if strand != GeneStrand:
if strand == '+':
strand = '-'
else:
strand = '+'
Start, End = End - 2, Start - 2
Start, End = abs(Start + offset) - 1, abs(End + offset) - 1
annot = orf[0].split()[1]
if annot == 'n/a':
annot = 'unknown'
gff['#seqname'].append(insertion['ID'])
gff['source'].append('Prodigal and Pfam')
gff['feature'].append('CDS')
gff['start'].append(Start)
gff['end'].append(End)
gff['score'].append('.')
gff['strand'].append(strand)
gff['frame'].append('.')
gff['attribute'].append('ID=%s; Name=%s' % (ID, annot))
return gff | python | def parse_orf(insertion, gff):
"""
parse ORF to gff format
"""
offset = insertion['offset']
if type(insertion['orf']) is not str:
return gff
for orf in parse_fasta(insertion['orf'].split('|')):
ID = orf[0].split('>')[1].split()[0]
Start, End, strand = [int(i) for i in orf[0].split(' # ')[1:4]]
if strand == 1:
strand = '+'
else:
strand = '-'
GeneStrand = insertion['strand']
if strand != GeneStrand:
if strand == '+':
strand = '-'
else:
strand = '+'
Start, End = End - 2, Start - 2
Start, End = abs(Start + offset) - 1, abs(End + offset) - 1
annot = orf[0].split()[1]
if annot == 'n/a':
annot = 'unknown'
gff['#seqname'].append(insertion['ID'])
gff['source'].append('Prodigal and Pfam')
gff['feature'].append('CDS')
gff['start'].append(Start)
gff['end'].append(End)
gff['score'].append('.')
gff['strand'].append(strand)
gff['frame'].append('.')
gff['attribute'].append('ID=%s; Name=%s' % (ID, annot))
return gff | [
"def",
"parse_orf",
"(",
"insertion",
",",
"gff",
")",
":",
"offset",
"=",
"insertion",
"[",
"'offset'",
"]",
"if",
"type",
"(",
"insertion",
"[",
"'orf'",
"]",
")",
"is",
"not",
"str",
":",
"return",
"gff",
"for",
"orf",
"in",
"parse_fasta",
"(",
"insertion",
"[",
"'orf'",
"]",
".",
"split",
"(",
"'|'",
")",
")",
":",
"ID",
"=",
"orf",
"[",
"0",
"]",
".",
"split",
"(",
"'>'",
")",
"[",
"1",
"]",
".",
"split",
"(",
")",
"[",
"0",
"]",
"Start",
",",
"End",
",",
"strand",
"=",
"[",
"int",
"(",
"i",
")",
"for",
"i",
"in",
"orf",
"[",
"0",
"]",
".",
"split",
"(",
"' # '",
")",
"[",
"1",
":",
"4",
"]",
"]",
"if",
"strand",
"==",
"1",
":",
"strand",
"=",
"'+'",
"else",
":",
"strand",
"=",
"'-'",
"GeneStrand",
"=",
"insertion",
"[",
"'strand'",
"]",
"if",
"strand",
"!=",
"GeneStrand",
":",
"if",
"strand",
"==",
"'+'",
":",
"strand",
"=",
"'-'",
"else",
":",
"strand",
"=",
"'+'",
"Start",
",",
"End",
"=",
"End",
"-",
"2",
",",
"Start",
"-",
"2",
"Start",
",",
"End",
"=",
"abs",
"(",
"Start",
"+",
"offset",
")",
"-",
"1",
",",
"abs",
"(",
"End",
"+",
"offset",
")",
"-",
"1",
"annot",
"=",
"orf",
"[",
"0",
"]",
".",
"split",
"(",
")",
"[",
"1",
"]",
"if",
"annot",
"==",
"'n/a'",
":",
"annot",
"=",
"'unknown'",
"gff",
"[",
"'#seqname'",
"]",
".",
"append",
"(",
"insertion",
"[",
"'ID'",
"]",
")",
"gff",
"[",
"'source'",
"]",
".",
"append",
"(",
"'Prodigal and Pfam'",
")",
"gff",
"[",
"'feature'",
"]",
".",
"append",
"(",
"'CDS'",
")",
"gff",
"[",
"'start'",
"]",
".",
"append",
"(",
"Start",
")",
"gff",
"[",
"'end'",
"]",
".",
"append",
"(",
"End",
")",
"gff",
"[",
"'score'",
"]",
".",
"append",
"(",
"'.'",
")",
"gff",
"[",
"'strand'",
"]",
".",
"append",
"(",
"strand",
")",
"gff",
"[",
"'frame'",
"]",
".",
"append",
"(",
"'.'",
")",
"gff",
"[",
"'attribute'",
"]",
".",
"append",
"(",
"'ID=%s; Name=%s'",
"%",
"(",
"ID",
",",
"annot",
")",
")",
"return",
"gff"
] | parse ORF to gff format | [
"parse",
"ORF",
"to",
"gff",
"format"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_insertions_gff.py#L42-L76 | train |
christophertbrown/bioscripts | ctbBio/rRNA_insertions_gff.py | parse_insertion | def parse_insertion(insertion, gff):
"""
parse insertion to gff format
"""
offset = insertion['offset']
for ins in parse_fasta(insertion['insertion sequence'].split('|')):
strand = insertion['strand']
ID = ins[0].split('>')[1].split()[0]
Start, End = [int(i) for i in ins[0].split('gene-pos=', 1)[1].split()[0].split('-')]
Start, End = abs(Start + offset), abs(End + offset)
if strand == '-':
Start, End = End, Start
gff['#seqname'].append(insertion['ID'])
gff['source'].append(insertion['source'])
gff['feature'].append('IVS')
gff['start'].append(Start)
gff['end'].append(End)
gff['score'].append('.')
gff['strand'].append(strand) # same as rRNA
gff['frame'].append('.')
gff['attribute'].append('ID=%s' % (ID))
return gff | python | def parse_insertion(insertion, gff):
"""
parse insertion to gff format
"""
offset = insertion['offset']
for ins in parse_fasta(insertion['insertion sequence'].split('|')):
strand = insertion['strand']
ID = ins[0].split('>')[1].split()[0]
Start, End = [int(i) for i in ins[0].split('gene-pos=', 1)[1].split()[0].split('-')]
Start, End = abs(Start + offset), abs(End + offset)
if strand == '-':
Start, End = End, Start
gff['#seqname'].append(insertion['ID'])
gff['source'].append(insertion['source'])
gff['feature'].append('IVS')
gff['start'].append(Start)
gff['end'].append(End)
gff['score'].append('.')
gff['strand'].append(strand) # same as rRNA
gff['frame'].append('.')
gff['attribute'].append('ID=%s' % (ID))
return gff | [
"def",
"parse_insertion",
"(",
"insertion",
",",
"gff",
")",
":",
"offset",
"=",
"insertion",
"[",
"'offset'",
"]",
"for",
"ins",
"in",
"parse_fasta",
"(",
"insertion",
"[",
"'insertion sequence'",
"]",
".",
"split",
"(",
"'|'",
")",
")",
":",
"strand",
"=",
"insertion",
"[",
"'strand'",
"]",
"ID",
"=",
"ins",
"[",
"0",
"]",
".",
"split",
"(",
"'>'",
")",
"[",
"1",
"]",
".",
"split",
"(",
")",
"[",
"0",
"]",
"Start",
",",
"End",
"=",
"[",
"int",
"(",
"i",
")",
"for",
"i",
"in",
"ins",
"[",
"0",
"]",
".",
"split",
"(",
"'gene-pos='",
",",
"1",
")",
"[",
"1",
"]",
".",
"split",
"(",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'-'",
")",
"]",
"Start",
",",
"End",
"=",
"abs",
"(",
"Start",
"+",
"offset",
")",
",",
"abs",
"(",
"End",
"+",
"offset",
")",
"if",
"strand",
"==",
"'-'",
":",
"Start",
",",
"End",
"=",
"End",
",",
"Start",
"gff",
"[",
"'#seqname'",
"]",
".",
"append",
"(",
"insertion",
"[",
"'ID'",
"]",
")",
"gff",
"[",
"'source'",
"]",
".",
"append",
"(",
"insertion",
"[",
"'source'",
"]",
")",
"gff",
"[",
"'feature'",
"]",
".",
"append",
"(",
"'IVS'",
")",
"gff",
"[",
"'start'",
"]",
".",
"append",
"(",
"Start",
")",
"gff",
"[",
"'end'",
"]",
".",
"append",
"(",
"End",
")",
"gff",
"[",
"'score'",
"]",
".",
"append",
"(",
"'.'",
")",
"gff",
"[",
"'strand'",
"]",
".",
"append",
"(",
"strand",
")",
"# same as rRNA",
"gff",
"[",
"'frame'",
"]",
".",
"append",
"(",
"'.'",
")",
"gff",
"[",
"'attribute'",
"]",
".",
"append",
"(",
"'ID=%s'",
"%",
"(",
"ID",
")",
")",
"return",
"gff"
] | parse insertion to gff format | [
"parse",
"insertion",
"to",
"gff",
"format"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_insertions_gff.py#L78-L99 | train |
christophertbrown/bioscripts | ctbBio/rRNA_insertions_gff.py | parse_rRNA | def parse_rRNA(insertion, seq, gff):
"""
parse rRNA to gff format
"""
offset = insertion['offset']
strand = insertion['strand']
for rRNA in parse_masked(seq, 0)[0]:
rRNA = ''.join(rRNA)
Start = seq[1].find(rRNA) + 1
End = Start + len(rRNA) - 1
if strand == '-':
Start, End = End - 2, Start - 2
pos = (abs(Start + offset) - 1, abs(End + offset) - 1)
Start, End = min(pos), max(pos)
source = insertion['source']
annot = '%s rRNA' % (source.split('from', 1)[0])
gff['#seqname'].append(insertion['ID'])
gff['source'].append(source)
gff['feature'].append('rRNA')
gff['start'].append(Start)
gff['end'].append(End)
gff['score'].append('.')
gff['strand'].append(strand)
gff['frame'].append('.')
gff['attribute'].append('Name=%s' % (annot))
return gff | python | def parse_rRNA(insertion, seq, gff):
"""
parse rRNA to gff format
"""
offset = insertion['offset']
strand = insertion['strand']
for rRNA in parse_masked(seq, 0)[0]:
rRNA = ''.join(rRNA)
Start = seq[1].find(rRNA) + 1
End = Start + len(rRNA) - 1
if strand == '-':
Start, End = End - 2, Start - 2
pos = (abs(Start + offset) - 1, abs(End + offset) - 1)
Start, End = min(pos), max(pos)
source = insertion['source']
annot = '%s rRNA' % (source.split('from', 1)[0])
gff['#seqname'].append(insertion['ID'])
gff['source'].append(source)
gff['feature'].append('rRNA')
gff['start'].append(Start)
gff['end'].append(End)
gff['score'].append('.')
gff['strand'].append(strand)
gff['frame'].append('.')
gff['attribute'].append('Name=%s' % (annot))
return gff | [
"def",
"parse_rRNA",
"(",
"insertion",
",",
"seq",
",",
"gff",
")",
":",
"offset",
"=",
"insertion",
"[",
"'offset'",
"]",
"strand",
"=",
"insertion",
"[",
"'strand'",
"]",
"for",
"rRNA",
"in",
"parse_masked",
"(",
"seq",
",",
"0",
")",
"[",
"0",
"]",
":",
"rRNA",
"=",
"''",
".",
"join",
"(",
"rRNA",
")",
"Start",
"=",
"seq",
"[",
"1",
"]",
".",
"find",
"(",
"rRNA",
")",
"+",
"1",
"End",
"=",
"Start",
"+",
"len",
"(",
"rRNA",
")",
"-",
"1",
"if",
"strand",
"==",
"'-'",
":",
"Start",
",",
"End",
"=",
"End",
"-",
"2",
",",
"Start",
"-",
"2",
"pos",
"=",
"(",
"abs",
"(",
"Start",
"+",
"offset",
")",
"-",
"1",
",",
"abs",
"(",
"End",
"+",
"offset",
")",
"-",
"1",
")",
"Start",
",",
"End",
"=",
"min",
"(",
"pos",
")",
",",
"max",
"(",
"pos",
")",
"source",
"=",
"insertion",
"[",
"'source'",
"]",
"annot",
"=",
"'%s rRNA'",
"%",
"(",
"source",
".",
"split",
"(",
"'from'",
",",
"1",
")",
"[",
"0",
"]",
")",
"gff",
"[",
"'#seqname'",
"]",
".",
"append",
"(",
"insertion",
"[",
"'ID'",
"]",
")",
"gff",
"[",
"'source'",
"]",
".",
"append",
"(",
"source",
")",
"gff",
"[",
"'feature'",
"]",
".",
"append",
"(",
"'rRNA'",
")",
"gff",
"[",
"'start'",
"]",
".",
"append",
"(",
"Start",
")",
"gff",
"[",
"'end'",
"]",
".",
"append",
"(",
"End",
")",
"gff",
"[",
"'score'",
"]",
".",
"append",
"(",
"'.'",
")",
"gff",
"[",
"'strand'",
"]",
".",
"append",
"(",
"strand",
")",
"gff",
"[",
"'frame'",
"]",
".",
"append",
"(",
"'.'",
")",
"gff",
"[",
"'attribute'",
"]",
".",
"append",
"(",
"'Name=%s'",
"%",
"(",
"annot",
")",
")",
"return",
"gff"
] | parse rRNA to gff format | [
"parse",
"rRNA",
"to",
"gff",
"format"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_insertions_gff.py#L122-L147 | train |
christophertbrown/bioscripts | ctbBio/rRNA_insertions_gff.py | iTable2GFF | def iTable2GFF(iTable, fa, contig = False):
"""
convert iTable to gff file
"""
columns = ['#seqname', 'source', 'feature', 'start', 'end', 'score', 'strand', 'frame', 'attribute']
gff = {c:[] for c in columns}
for insertion in iTable.iterrows():
insertion = insertion[1]
if insertion['ID'] not in fa:
continue
# rRNA strand
strand = insertion['sequence'].split('strand=', 1)[1].split()[0]
# set rRNA positions for reporting features on contig or extracted sequence
if contig is True:
gene = [int(i) for i in insertion['sequence'].split('pos=', 1)[1].split()[0].split('-')]
if strand == '-':
offset = -1 * (gene[1])
else:
offset = gene[0]
else:
strand = '+'
gene = [1, int(insertion['sequence'].split('total-len=', 1)[1].split()[0])]
offset = gene[0]
insertion['strand'] = strand
insertion['offset'] = offset
# source for prediction
source = insertion['sequence'].split('::model', 1)[0].rsplit(' ', 1)[-1]
insertion['source'] = source
# rRNA gene
geneAnnot = '%s rRNA gene' % (source.split('from', 1)[0])
geneNum = insertion['sequence'].split('seq=', 1)[1].split()[0]
gff['#seqname'].append(insertion['ID'])
gff['source'].append(source)
gff['feature'].append('Gene')
gff['start'].append(gene[0])
gff['end'].append(gene[1])
gff['score'].append('.')
gff['strand'].append(strand)
gff['frame'].append('.')
gff['attribute'].append('ID=%s; Name=%s' % (geneNum, geneAnnot))
# rRNA
gff = parse_rRNA(insertion, fa[insertion['ID']], gff)
# insertions
gff = parse_insertion(insertion, gff)
# orfs
gff = parse_orf(insertion, gff)
# catalytic RNAs
gff = parse_catalytic(insertion, gff)
return pd.DataFrame(gff)[columns].drop_duplicates() | python | def iTable2GFF(iTable, fa, contig = False):
"""
convert iTable to gff file
"""
columns = ['#seqname', 'source', 'feature', 'start', 'end', 'score', 'strand', 'frame', 'attribute']
gff = {c:[] for c in columns}
for insertion in iTable.iterrows():
insertion = insertion[1]
if insertion['ID'] not in fa:
continue
# rRNA strand
strand = insertion['sequence'].split('strand=', 1)[1].split()[0]
# set rRNA positions for reporting features on contig or extracted sequence
if contig is True:
gene = [int(i) for i in insertion['sequence'].split('pos=', 1)[1].split()[0].split('-')]
if strand == '-':
offset = -1 * (gene[1])
else:
offset = gene[0]
else:
strand = '+'
gene = [1, int(insertion['sequence'].split('total-len=', 1)[1].split()[0])]
offset = gene[0]
insertion['strand'] = strand
insertion['offset'] = offset
# source for prediction
source = insertion['sequence'].split('::model', 1)[0].rsplit(' ', 1)[-1]
insertion['source'] = source
# rRNA gene
geneAnnot = '%s rRNA gene' % (source.split('from', 1)[0])
geneNum = insertion['sequence'].split('seq=', 1)[1].split()[0]
gff['#seqname'].append(insertion['ID'])
gff['source'].append(source)
gff['feature'].append('Gene')
gff['start'].append(gene[0])
gff['end'].append(gene[1])
gff['score'].append('.')
gff['strand'].append(strand)
gff['frame'].append('.')
gff['attribute'].append('ID=%s; Name=%s' % (geneNum, geneAnnot))
# rRNA
gff = parse_rRNA(insertion, fa[insertion['ID']], gff)
# insertions
gff = parse_insertion(insertion, gff)
# orfs
gff = parse_orf(insertion, gff)
# catalytic RNAs
gff = parse_catalytic(insertion, gff)
return pd.DataFrame(gff)[columns].drop_duplicates() | [
"def",
"iTable2GFF",
"(",
"iTable",
",",
"fa",
",",
"contig",
"=",
"False",
")",
":",
"columns",
"=",
"[",
"'#seqname'",
",",
"'source'",
",",
"'feature'",
",",
"'start'",
",",
"'end'",
",",
"'score'",
",",
"'strand'",
",",
"'frame'",
",",
"'attribute'",
"]",
"gff",
"=",
"{",
"c",
":",
"[",
"]",
"for",
"c",
"in",
"columns",
"}",
"for",
"insertion",
"in",
"iTable",
".",
"iterrows",
"(",
")",
":",
"insertion",
"=",
"insertion",
"[",
"1",
"]",
"if",
"insertion",
"[",
"'ID'",
"]",
"not",
"in",
"fa",
":",
"continue",
"# rRNA strand",
"strand",
"=",
"insertion",
"[",
"'sequence'",
"]",
".",
"split",
"(",
"'strand='",
",",
"1",
")",
"[",
"1",
"]",
".",
"split",
"(",
")",
"[",
"0",
"]",
"# set rRNA positions for reporting features on contig or extracted sequence",
"if",
"contig",
"is",
"True",
":",
"gene",
"=",
"[",
"int",
"(",
"i",
")",
"for",
"i",
"in",
"insertion",
"[",
"'sequence'",
"]",
".",
"split",
"(",
"'pos='",
",",
"1",
")",
"[",
"1",
"]",
".",
"split",
"(",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'-'",
")",
"]",
"if",
"strand",
"==",
"'-'",
":",
"offset",
"=",
"-",
"1",
"*",
"(",
"gene",
"[",
"1",
"]",
")",
"else",
":",
"offset",
"=",
"gene",
"[",
"0",
"]",
"else",
":",
"strand",
"=",
"'+'",
"gene",
"=",
"[",
"1",
",",
"int",
"(",
"insertion",
"[",
"'sequence'",
"]",
".",
"split",
"(",
"'total-len='",
",",
"1",
")",
"[",
"1",
"]",
".",
"split",
"(",
")",
"[",
"0",
"]",
")",
"]",
"offset",
"=",
"gene",
"[",
"0",
"]",
"insertion",
"[",
"'strand'",
"]",
"=",
"strand",
"insertion",
"[",
"'offset'",
"]",
"=",
"offset",
"# source for prediction",
"source",
"=",
"insertion",
"[",
"'sequence'",
"]",
".",
"split",
"(",
"'::model'",
",",
"1",
")",
"[",
"0",
"]",
".",
"rsplit",
"(",
"' '",
",",
"1",
")",
"[",
"-",
"1",
"]",
"insertion",
"[",
"'source'",
"]",
"=",
"source",
"# rRNA gene",
"geneAnnot",
"=",
"'%s rRNA gene'",
"%",
"(",
"source",
".",
"split",
"(",
"'from'",
",",
"1",
")",
"[",
"0",
"]",
")",
"geneNum",
"=",
"insertion",
"[",
"'sequence'",
"]",
".",
"split",
"(",
"'seq='",
",",
"1",
")",
"[",
"1",
"]",
".",
"split",
"(",
")",
"[",
"0",
"]",
"gff",
"[",
"'#seqname'",
"]",
".",
"append",
"(",
"insertion",
"[",
"'ID'",
"]",
")",
"gff",
"[",
"'source'",
"]",
".",
"append",
"(",
"source",
")",
"gff",
"[",
"'feature'",
"]",
".",
"append",
"(",
"'Gene'",
")",
"gff",
"[",
"'start'",
"]",
".",
"append",
"(",
"gene",
"[",
"0",
"]",
")",
"gff",
"[",
"'end'",
"]",
".",
"append",
"(",
"gene",
"[",
"1",
"]",
")",
"gff",
"[",
"'score'",
"]",
".",
"append",
"(",
"'.'",
")",
"gff",
"[",
"'strand'",
"]",
".",
"append",
"(",
"strand",
")",
"gff",
"[",
"'frame'",
"]",
".",
"append",
"(",
"'.'",
")",
"gff",
"[",
"'attribute'",
"]",
".",
"append",
"(",
"'ID=%s; Name=%s'",
"%",
"(",
"geneNum",
",",
"geneAnnot",
")",
")",
"# rRNA",
"gff",
"=",
"parse_rRNA",
"(",
"insertion",
",",
"fa",
"[",
"insertion",
"[",
"'ID'",
"]",
"]",
",",
"gff",
")",
"# insertions",
"gff",
"=",
"parse_insertion",
"(",
"insertion",
",",
"gff",
")",
"# orfs",
"gff",
"=",
"parse_orf",
"(",
"insertion",
",",
"gff",
")",
"# catalytic RNAs",
"gff",
"=",
"parse_catalytic",
"(",
"insertion",
",",
"gff",
")",
"return",
"pd",
".",
"DataFrame",
"(",
"gff",
")",
"[",
"columns",
"]",
".",
"drop_duplicates",
"(",
")"
] | convert iTable to gff file | [
"convert",
"iTable",
"to",
"gff",
"file"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/rRNA_insertions_gff.py#L149-L197 | train |
smdabdoub/phylotoast | bin/biom_phyla_summary.py | summarize_taxa | def summarize_taxa(biom):
"""
Given an abundance table, group the counts by every
taxonomic level.
"""
tamtcounts = defaultdict(int)
tot_seqs = 0.0
for row, col, amt in biom['data']:
tot_seqs += amt
rtax = biom['rows'][row]['metadata']['taxonomy']
for i, t in enumerate(rtax):
t = t.strip()
if i == len(rtax)-1 and len(t) > 3 and len(rtax[-1]) > 3:
t = 's__'+rtax[i-1].strip().split('_')[-1]+'_'+t.split('_')[-1]
tamtcounts[t] += amt
lvlData = {lvl: levelData(tamtcounts, tot_seqs, lvl) for lvl in ['k', 'p', 'c', 'o', 'f', 'g', 's']}
return tot_seqs, lvlData | python | def summarize_taxa(biom):
"""
Given an abundance table, group the counts by every
taxonomic level.
"""
tamtcounts = defaultdict(int)
tot_seqs = 0.0
for row, col, amt in biom['data']:
tot_seqs += amt
rtax = biom['rows'][row]['metadata']['taxonomy']
for i, t in enumerate(rtax):
t = t.strip()
if i == len(rtax)-1 and len(t) > 3 and len(rtax[-1]) > 3:
t = 's__'+rtax[i-1].strip().split('_')[-1]+'_'+t.split('_')[-1]
tamtcounts[t] += amt
lvlData = {lvl: levelData(tamtcounts, tot_seqs, lvl) for lvl in ['k', 'p', 'c', 'o', 'f', 'g', 's']}
return tot_seqs, lvlData | [
"def",
"summarize_taxa",
"(",
"biom",
")",
":",
"tamtcounts",
"=",
"defaultdict",
"(",
"int",
")",
"tot_seqs",
"=",
"0.0",
"for",
"row",
",",
"col",
",",
"amt",
"in",
"biom",
"[",
"'data'",
"]",
":",
"tot_seqs",
"+=",
"amt",
"rtax",
"=",
"biom",
"[",
"'rows'",
"]",
"[",
"row",
"]",
"[",
"'metadata'",
"]",
"[",
"'taxonomy'",
"]",
"for",
"i",
",",
"t",
"in",
"enumerate",
"(",
"rtax",
")",
":",
"t",
"=",
"t",
".",
"strip",
"(",
")",
"if",
"i",
"==",
"len",
"(",
"rtax",
")",
"-",
"1",
"and",
"len",
"(",
"t",
")",
">",
"3",
"and",
"len",
"(",
"rtax",
"[",
"-",
"1",
"]",
")",
">",
"3",
":",
"t",
"=",
"'s__'",
"+",
"rtax",
"[",
"i",
"-",
"1",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'_'",
")",
"[",
"-",
"1",
"]",
"+",
"'_'",
"+",
"t",
".",
"split",
"(",
"'_'",
")",
"[",
"-",
"1",
"]",
"tamtcounts",
"[",
"t",
"]",
"+=",
"amt",
"lvlData",
"=",
"{",
"lvl",
":",
"levelData",
"(",
"tamtcounts",
",",
"tot_seqs",
",",
"lvl",
")",
"for",
"lvl",
"in",
"[",
"'k'",
",",
"'p'",
",",
"'c'",
",",
"'o'",
",",
"'f'",
",",
"'g'",
",",
"'s'",
"]",
"}",
"return",
"tot_seqs",
",",
"lvlData"
] | Given an abundance table, group the counts by every
taxonomic level. | [
"Given",
"an",
"abundance",
"table",
"group",
"the",
"counts",
"by",
"every",
"taxonomic",
"level",
"."
] | 0b74ef171e6a84761710548501dfac71285a58a3 | https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/biom_phyla_summary.py#L27-L46 | train |
scottrice/pysteam | pysteam/legacy/game.py | Game.custom_image | def custom_image(self, user):
"""Returns the path to the custom image set for this game, or None if
no image is set"""
for ext in self.valid_custom_image_extensions():
image_location = self._custom_image_path(user, ext)
if os.path.isfile(image_location):
return image_location
return None | python | def custom_image(self, user):
"""Returns the path to the custom image set for this game, or None if
no image is set"""
for ext in self.valid_custom_image_extensions():
image_location = self._custom_image_path(user, ext)
if os.path.isfile(image_location):
return image_location
return None | [
"def",
"custom_image",
"(",
"self",
",",
"user",
")",
":",
"for",
"ext",
"in",
"self",
".",
"valid_custom_image_extensions",
"(",
")",
":",
"image_location",
"=",
"self",
".",
"_custom_image_path",
"(",
"user",
",",
"ext",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"image_location",
")",
":",
"return",
"image_location",
"return",
"None"
] | Returns the path to the custom image set for this game, or None if
no image is set | [
"Returns",
"the",
"path",
"to",
"the",
"custom",
"image",
"set",
"for",
"this",
"game",
"or",
"None",
"if",
"no",
"image",
"is",
"set"
] | 1eb2254b5235a053a953e596fa7602d0b110245d | https://github.com/scottrice/pysteam/blob/1eb2254b5235a053a953e596fa7602d0b110245d/pysteam/legacy/game.py#L41-L48 | train |
scottrice/pysteam | pysteam/legacy/game.py | Game.set_image | def set_image(self, user, image_path):
"""Sets a custom image for the game. `image_path` should refer to
an image file on disk"""
_, ext = os.path.splitext(image_path)
shutil.copy(image_path, self._custom_image_path(user, ext)) | python | def set_image(self, user, image_path):
"""Sets a custom image for the game. `image_path` should refer to
an image file on disk"""
_, ext = os.path.splitext(image_path)
shutil.copy(image_path, self._custom_image_path(user, ext)) | [
"def",
"set_image",
"(",
"self",
",",
"user",
",",
"image_path",
")",
":",
"_",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"image_path",
")",
"shutil",
".",
"copy",
"(",
"image_path",
",",
"self",
".",
"_custom_image_path",
"(",
"user",
",",
"ext",
")",
")"
] | Sets a custom image for the game. `image_path` should refer to
an image file on disk | [
"Sets",
"a",
"custom",
"image",
"for",
"the",
"game",
".",
"image_path",
"should",
"refer",
"to",
"an",
"image",
"file",
"on",
"disk"
] | 1eb2254b5235a053a953e596fa7602d0b110245d | https://github.com/scottrice/pysteam/blob/1eb2254b5235a053a953e596fa7602d0b110245d/pysteam/legacy/game.py#L50-L54 | train |
christophertbrown/bioscripts | ctbBio/filter_fastq_sam.py | sam_list | def sam_list(sam):
"""
get a list of mapped reads
"""
list = []
for file in sam:
for line in file:
if line.startswith('@') is False:
line = line.strip().split()
id, map = line[0], int(line[1])
if map != 4 and map != 8:
list.append(id)
return set(list) | python | def sam_list(sam):
"""
get a list of mapped reads
"""
list = []
for file in sam:
for line in file:
if line.startswith('@') is False:
line = line.strip().split()
id, map = line[0], int(line[1])
if map != 4 and map != 8:
list.append(id)
return set(list) | [
"def",
"sam_list",
"(",
"sam",
")",
":",
"list",
"=",
"[",
"]",
"for",
"file",
"in",
"sam",
":",
"for",
"line",
"in",
"file",
":",
"if",
"line",
".",
"startswith",
"(",
"'@'",
")",
"is",
"False",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"id",
",",
"map",
"=",
"line",
"[",
"0",
"]",
",",
"int",
"(",
"line",
"[",
"1",
"]",
")",
"if",
"map",
"!=",
"4",
"and",
"map",
"!=",
"8",
":",
"list",
".",
"append",
"(",
"id",
")",
"return",
"set",
"(",
"list",
")"
] | get a list of mapped reads | [
"get",
"a",
"list",
"of",
"mapped",
"reads"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/filter_fastq_sam.py#L7-L19 | train |
christophertbrown/bioscripts | ctbBio/filter_fastq_sam.py | sam_list_paired | def sam_list_paired(sam):
"""
get a list of mapped reads
require that both pairs are mapped in the sam file in order to remove the reads
"""
list = []
pair = ['1', '2']
prev = ''
for file in sam:
for line in file:
if line.startswith('@') is False:
line = line.strip().split()
id, map = line[0], int(line[1])
if map != 4 and map != 8:
read = id.rsplit('/')[0]
if read == prev:
list.append(read)
prev = read
return set(list) | python | def sam_list_paired(sam):
"""
get a list of mapped reads
require that both pairs are mapped in the sam file in order to remove the reads
"""
list = []
pair = ['1', '2']
prev = ''
for file in sam:
for line in file:
if line.startswith('@') is False:
line = line.strip().split()
id, map = line[0], int(line[1])
if map != 4 and map != 8:
read = id.rsplit('/')[0]
if read == prev:
list.append(read)
prev = read
return set(list) | [
"def",
"sam_list_paired",
"(",
"sam",
")",
":",
"list",
"=",
"[",
"]",
"pair",
"=",
"[",
"'1'",
",",
"'2'",
"]",
"prev",
"=",
"''",
"for",
"file",
"in",
"sam",
":",
"for",
"line",
"in",
"file",
":",
"if",
"line",
".",
"startswith",
"(",
"'@'",
")",
"is",
"False",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"id",
",",
"map",
"=",
"line",
"[",
"0",
"]",
",",
"int",
"(",
"line",
"[",
"1",
"]",
")",
"if",
"map",
"!=",
"4",
"and",
"map",
"!=",
"8",
":",
"read",
"=",
"id",
".",
"rsplit",
"(",
"'/'",
")",
"[",
"0",
"]",
"if",
"read",
"==",
"prev",
":",
"list",
".",
"append",
"(",
"read",
")",
"prev",
"=",
"read",
"return",
"set",
"(",
"list",
")"
] | get a list of mapped reads
require that both pairs are mapped in the sam file in order to remove the reads | [
"get",
"a",
"list",
"of",
"mapped",
"reads",
"require",
"that",
"both",
"pairs",
"are",
"mapped",
"in",
"the",
"sam",
"file",
"in",
"order",
"to",
"remove",
"the",
"reads"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/filter_fastq_sam.py#L21-L39 | train |
christophertbrown/bioscripts | ctbBio/filter_fastq_sam.py | filter_paired | def filter_paired(list):
"""
require that both pairs are mapped in the sam file in order to remove the reads
"""
pairs = {}
filtered = []
for id in list:
read = id.rsplit('/')[0]
if read not in pairs:
pairs[read] = []
pairs[read].append(id)
for read in pairs:
ids = pairs[read]
if len(ids) == 2:
filtered.extend(ids)
return set(filtered) | python | def filter_paired(list):
"""
require that both pairs are mapped in the sam file in order to remove the reads
"""
pairs = {}
filtered = []
for id in list:
read = id.rsplit('/')[0]
if read not in pairs:
pairs[read] = []
pairs[read].append(id)
for read in pairs:
ids = pairs[read]
if len(ids) == 2:
filtered.extend(ids)
return set(filtered) | [
"def",
"filter_paired",
"(",
"list",
")",
":",
"pairs",
"=",
"{",
"}",
"filtered",
"=",
"[",
"]",
"for",
"id",
"in",
"list",
":",
"read",
"=",
"id",
".",
"rsplit",
"(",
"'/'",
")",
"[",
"0",
"]",
"if",
"read",
"not",
"in",
"pairs",
":",
"pairs",
"[",
"read",
"]",
"=",
"[",
"]",
"pairs",
"[",
"read",
"]",
".",
"append",
"(",
"id",
")",
"for",
"read",
"in",
"pairs",
":",
"ids",
"=",
"pairs",
"[",
"read",
"]",
"if",
"len",
"(",
"ids",
")",
"==",
"2",
":",
"filtered",
".",
"extend",
"(",
"ids",
")",
"return",
"set",
"(",
"filtered",
")"
] | require that both pairs are mapped in the sam file in order to remove the reads | [
"require",
"that",
"both",
"pairs",
"are",
"mapped",
"in",
"the",
"sam",
"file",
"in",
"order",
"to",
"remove",
"the",
"reads"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/filter_fastq_sam.py#L41-L56 | train |
christophertbrown/bioscripts | ctbBio/mapped.py | sam2fastq | def sam2fastq(line):
"""
print fastq from sam
"""
fastq = []
fastq.append('@%s' % line[0])
fastq.append(line[9])
fastq.append('+%s' % line[0])
fastq.append(line[10])
return fastq | python | def sam2fastq(line):
"""
print fastq from sam
"""
fastq = []
fastq.append('@%s' % line[0])
fastq.append(line[9])
fastq.append('+%s' % line[0])
fastq.append(line[10])
return fastq | [
"def",
"sam2fastq",
"(",
"line",
")",
":",
"fastq",
"=",
"[",
"]",
"fastq",
".",
"append",
"(",
"'@%s'",
"%",
"line",
"[",
"0",
"]",
")",
"fastq",
".",
"append",
"(",
"line",
"[",
"9",
"]",
")",
"fastq",
".",
"append",
"(",
"'+%s'",
"%",
"line",
"[",
"0",
"]",
")",
"fastq",
".",
"append",
"(",
"line",
"[",
"10",
"]",
")",
"return",
"fastq"
] | print fastq from sam | [
"print",
"fastq",
"from",
"sam"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/mapped.py#L13-L22 | train |
christophertbrown/bioscripts | ctbBio/mapped.py | check_mismatches | def check_mismatches(read, pair, mismatches, mm_option, req_map):
"""
- check to see if the read maps with <= threshold number of mismatches
- mm_option = 'one' or 'both' depending on whether or not one or both reads
in a pair need to pass the mismatch threshold
- pair can be False if read does not have a pair
- make sure alignment score is not 0, which would indicate that the read was not aligned to the reference
"""
# if read is not paired, make sure it is mapped and that mm <= thresh
if pair is False:
mm = count_mismatches(read)
if mm is False:
return False
# if no threshold is supplied, return True
if mismatches is False:
return True
# passes threshold?
if mm <= mismatches:
return True
# paired reads
r_mm = count_mismatches(read)
p_mm = count_mismatches(pair)
# if neither read is mapped, return False
if r_mm is False and p_mm is False:
return False
# if no threshold, return True
if mismatches is False:
return True
# if req_map is True, both reads have to map
if req_map is True:
if r_mm is False or p_mm is False:
return False
## if option is 'one,' only one read has to pass threshold
if mm_option == 'one':
if (r_mm is not False and r_mm <= mismatches) or (p_mm is not False and p_mm <= mismatches):
return True
## if option is 'both,' both reads have to pass threshold
if mm_option == 'both':
## if one read in pair does not map to the scaffold,
## make sure the other read passes threshold
if r_mm is False:
if p_mm <= mismatches:
return True
elif p_mm is False:
if r_mm <= mismatches:
return True
elif (r_mm is not False and r_mm <= mismatches) and (p_mm is not False and p_mm <= mismatches):
return True
return False | python | def check_mismatches(read, pair, mismatches, mm_option, req_map):
"""
- check to see if the read maps with <= threshold number of mismatches
- mm_option = 'one' or 'both' depending on whether or not one or both reads
in a pair need to pass the mismatch threshold
- pair can be False if read does not have a pair
- make sure alignment score is not 0, which would indicate that the read was not aligned to the reference
"""
# if read is not paired, make sure it is mapped and that mm <= thresh
if pair is False:
mm = count_mismatches(read)
if mm is False:
return False
# if no threshold is supplied, return True
if mismatches is False:
return True
# passes threshold?
if mm <= mismatches:
return True
# paired reads
r_mm = count_mismatches(read)
p_mm = count_mismatches(pair)
# if neither read is mapped, return False
if r_mm is False and p_mm is False:
return False
# if no threshold, return True
if mismatches is False:
return True
# if req_map is True, both reads have to map
if req_map is True:
if r_mm is False or p_mm is False:
return False
## if option is 'one,' only one read has to pass threshold
if mm_option == 'one':
if (r_mm is not False and r_mm <= mismatches) or (p_mm is not False and p_mm <= mismatches):
return True
## if option is 'both,' both reads have to pass threshold
if mm_option == 'both':
## if one read in pair does not map to the scaffold,
## make sure the other read passes threshold
if r_mm is False:
if p_mm <= mismatches:
return True
elif p_mm is False:
if r_mm <= mismatches:
return True
elif (r_mm is not False and r_mm <= mismatches) and (p_mm is not False and p_mm <= mismatches):
return True
return False | [
"def",
"check_mismatches",
"(",
"read",
",",
"pair",
",",
"mismatches",
",",
"mm_option",
",",
"req_map",
")",
":",
"# if read is not paired, make sure it is mapped and that mm <= thresh",
"if",
"pair",
"is",
"False",
":",
"mm",
"=",
"count_mismatches",
"(",
"read",
")",
"if",
"mm",
"is",
"False",
":",
"return",
"False",
"# if no threshold is supplied, return True",
"if",
"mismatches",
"is",
"False",
":",
"return",
"True",
"# passes threshold?",
"if",
"mm",
"<=",
"mismatches",
":",
"return",
"True",
"# paired reads",
"r_mm",
"=",
"count_mismatches",
"(",
"read",
")",
"p_mm",
"=",
"count_mismatches",
"(",
"pair",
")",
"# if neither read is mapped, return False",
"if",
"r_mm",
"is",
"False",
"and",
"p_mm",
"is",
"False",
":",
"return",
"False",
"# if no threshold, return True",
"if",
"mismatches",
"is",
"False",
":",
"return",
"True",
"# if req_map is True, both reads have to map",
"if",
"req_map",
"is",
"True",
":",
"if",
"r_mm",
"is",
"False",
"or",
"p_mm",
"is",
"False",
":",
"return",
"False",
"## if option is 'one,' only one read has to pass threshold",
"if",
"mm_option",
"==",
"'one'",
":",
"if",
"(",
"r_mm",
"is",
"not",
"False",
"and",
"r_mm",
"<=",
"mismatches",
")",
"or",
"(",
"p_mm",
"is",
"not",
"False",
"and",
"p_mm",
"<=",
"mismatches",
")",
":",
"return",
"True",
"## if option is 'both,' both reads have to pass threshold",
"if",
"mm_option",
"==",
"'both'",
":",
"## if one read in pair does not map to the scaffold,",
"## make sure the other read passes threshold",
"if",
"r_mm",
"is",
"False",
":",
"if",
"p_mm",
"<=",
"mismatches",
":",
"return",
"True",
"elif",
"p_mm",
"is",
"False",
":",
"if",
"r_mm",
"<=",
"mismatches",
":",
"return",
"True",
"elif",
"(",
"r_mm",
"is",
"not",
"False",
"and",
"r_mm",
"<=",
"mismatches",
")",
"and",
"(",
"p_mm",
"is",
"not",
"False",
"and",
"p_mm",
"<=",
"mismatches",
")",
":",
"return",
"True",
"return",
"False"
] | - check to see if the read maps with <= threshold number of mismatches
- mm_option = 'one' or 'both' depending on whether or not one or both reads
in a pair need to pass the mismatch threshold
- pair can be False if read does not have a pair
- make sure alignment score is not 0, which would indicate that the read was not aligned to the reference | [
"-",
"check",
"to",
"see",
"if",
"the",
"read",
"maps",
"with",
"<",
"=",
"threshold",
"number",
"of",
"mismatches",
"-",
"mm_option",
"=",
"one",
"or",
"both",
"depending",
"on",
"whether",
"or",
"not",
"one",
"or",
"both",
"reads",
"in",
"a",
"pair",
"need",
"to",
"pass",
"the",
"mismatch",
"threshold",
"-",
"pair",
"can",
"be",
"False",
"if",
"read",
"does",
"not",
"have",
"a",
"pair",
"-",
"make",
"sure",
"alignment",
"score",
"is",
"not",
"0",
"which",
"would",
"indicate",
"that",
"the",
"read",
"was",
"not",
"aligned",
"to",
"the",
"reference"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/mapped.py#L36-L84 | train |
christophertbrown/bioscripts | ctbBio/mapped.py | check_region | def check_region(read, pair, region):
"""
determine whether or not reads map to specific region of scaffold
"""
if region is False:
return True
for mapping in read, pair:
if mapping is False:
continue
start, length = int(mapping[3]), len(mapping[9])
r = [start, start + length - 1]
if get_overlap(r, region) > 0:
return True
return False | python | def check_region(read, pair, region):
"""
determine whether or not reads map to specific region of scaffold
"""
if region is False:
return True
for mapping in read, pair:
if mapping is False:
continue
start, length = int(mapping[3]), len(mapping[9])
r = [start, start + length - 1]
if get_overlap(r, region) > 0:
return True
return False | [
"def",
"check_region",
"(",
"read",
",",
"pair",
",",
"region",
")",
":",
"if",
"region",
"is",
"False",
":",
"return",
"True",
"for",
"mapping",
"in",
"read",
",",
"pair",
":",
"if",
"mapping",
"is",
"False",
":",
"continue",
"start",
",",
"length",
"=",
"int",
"(",
"mapping",
"[",
"3",
"]",
")",
",",
"len",
"(",
"mapping",
"[",
"9",
"]",
")",
"r",
"=",
"[",
"start",
",",
"start",
"+",
"length",
"-",
"1",
"]",
"if",
"get_overlap",
"(",
"r",
",",
"region",
")",
">",
"0",
":",
"return",
"True",
"return",
"False"
] | determine whether or not reads map to specific region of scaffold | [
"determine",
"whether",
"or",
"not",
"reads",
"map",
"to",
"specific",
"region",
"of",
"scaffold"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/mapped.py#L92-L105 | train |
scottrice/pysteam | pysteam/steam.py | get_steam | def get_steam():
"""
Returns a Steam object representing the current Steam installation on the
users computer. If the user doesn't have Steam installed, returns None.
"""
# Helper function which checks if the potential userdata directory exists
# and returns a new Steam instance with that userdata directory if it does.
# If the directory doesnt exist it returns None instead
helper = lambda udd: Steam(udd) if os.path.exists(udd) else None
# For both OS X and Linux, Steam stores it's userdata in a consistent
# location.
plat = platform.system()
if plat == 'Darwin':
return helper(paths.default_osx_userdata_path())
if plat == 'Linux':
return helper(paths.default_linux_userdata_path())
# Windows is a bit trickier. The userdata directory is stored in the Steam
# installation directory, meaning that theoretically it could be anywhere.
# Luckily, Valve stores the installation directory in the registry, so its
# still possible for us to figure out automatically
if plat == 'Windows':
possible_dir = winutils.find_userdata_directory()
# Unlike the others, `possible_dir` might be None (if something odd
# happened with the registry)
return helper(possible_dir) if possible_dir is not None else None
# This should never be hit. Windows, OS X, and Linux should be the only
# supported platforms.
# TODO: Add logging here so that the user (developer) knows that something
# odd happened.
return None | python | def get_steam():
"""
Returns a Steam object representing the current Steam installation on the
users computer. If the user doesn't have Steam installed, returns None.
"""
# Helper function which checks if the potential userdata directory exists
# and returns a new Steam instance with that userdata directory if it does.
# If the directory doesnt exist it returns None instead
helper = lambda udd: Steam(udd) if os.path.exists(udd) else None
# For both OS X and Linux, Steam stores it's userdata in a consistent
# location.
plat = platform.system()
if plat == 'Darwin':
return helper(paths.default_osx_userdata_path())
if plat == 'Linux':
return helper(paths.default_linux_userdata_path())
# Windows is a bit trickier. The userdata directory is stored in the Steam
# installation directory, meaning that theoretically it could be anywhere.
# Luckily, Valve stores the installation directory in the registry, so its
# still possible for us to figure out automatically
if plat == 'Windows':
possible_dir = winutils.find_userdata_directory()
# Unlike the others, `possible_dir` might be None (if something odd
# happened with the registry)
return helper(possible_dir) if possible_dir is not None else None
# This should never be hit. Windows, OS X, and Linux should be the only
# supported platforms.
# TODO: Add logging here so that the user (developer) knows that something
# odd happened.
return None | [
"def",
"get_steam",
"(",
")",
":",
"# Helper function which checks if the potential userdata directory exists",
"# and returns a new Steam instance with that userdata directory if it does.",
"# If the directory doesnt exist it returns None instead",
"helper",
"=",
"lambda",
"udd",
":",
"Steam",
"(",
"udd",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"udd",
")",
"else",
"None",
"# For both OS X and Linux, Steam stores it's userdata in a consistent",
"# location.",
"plat",
"=",
"platform",
".",
"system",
"(",
")",
"if",
"plat",
"==",
"'Darwin'",
":",
"return",
"helper",
"(",
"paths",
".",
"default_osx_userdata_path",
"(",
")",
")",
"if",
"plat",
"==",
"'Linux'",
":",
"return",
"helper",
"(",
"paths",
".",
"default_linux_userdata_path",
"(",
")",
")",
"# Windows is a bit trickier. The userdata directory is stored in the Steam",
"# installation directory, meaning that theoretically it could be anywhere.",
"# Luckily, Valve stores the installation directory in the registry, so its",
"# still possible for us to figure out automatically",
"if",
"plat",
"==",
"'Windows'",
":",
"possible_dir",
"=",
"winutils",
".",
"find_userdata_directory",
"(",
")",
"# Unlike the others, `possible_dir` might be None (if something odd",
"# happened with the registry)",
"return",
"helper",
"(",
"possible_dir",
")",
"if",
"possible_dir",
"is",
"not",
"None",
"else",
"None",
"# This should never be hit. Windows, OS X, and Linux should be the only",
"# supported platforms.",
"# TODO: Add logging here so that the user (developer) knows that something",
"# odd happened.",
"return",
"None"
] | Returns a Steam object representing the current Steam installation on the
users computer. If the user doesn't have Steam installed, returns None. | [
"Returns",
"a",
"Steam",
"object",
"representing",
"the",
"current",
"Steam",
"installation",
"on",
"the",
"users",
"computer",
".",
"If",
"the",
"user",
"doesn",
"t",
"have",
"Steam",
"installed",
"returns",
"None",
"."
] | 1eb2254b5235a053a953e596fa7602d0b110245d | https://github.com/scottrice/pysteam/blob/1eb2254b5235a053a953e596fa7602d0b110245d/pysteam/steam.py#L12-L43 | train |
christophertbrown/bioscripts | ctbBio/transform.py | zero_to_one | def zero_to_one(table, option):
"""
normalize from zero to one for row or table
"""
if option == 'table':
m = min(min(table))
ma = max(max(table))
t = []
for row in table:
t_row = []
if option != 'table':
m, ma = min(row), max(row)
for i in row:
if ma == m:
t_row.append(0)
else:
t_row.append((i - m)/(ma - m))
t.append(t_row)
return t | python | def zero_to_one(table, option):
"""
normalize from zero to one for row or table
"""
if option == 'table':
m = min(min(table))
ma = max(max(table))
t = []
for row in table:
t_row = []
if option != 'table':
m, ma = min(row), max(row)
for i in row:
if ma == m:
t_row.append(0)
else:
t_row.append((i - m)/(ma - m))
t.append(t_row)
return t | [
"def",
"zero_to_one",
"(",
"table",
",",
"option",
")",
":",
"if",
"option",
"==",
"'table'",
":",
"m",
"=",
"min",
"(",
"min",
"(",
"table",
")",
")",
"ma",
"=",
"max",
"(",
"max",
"(",
"table",
")",
")",
"t",
"=",
"[",
"]",
"for",
"row",
"in",
"table",
":",
"t_row",
"=",
"[",
"]",
"if",
"option",
"!=",
"'table'",
":",
"m",
",",
"ma",
"=",
"min",
"(",
"row",
")",
",",
"max",
"(",
"row",
")",
"for",
"i",
"in",
"row",
":",
"if",
"ma",
"==",
"m",
":",
"t_row",
".",
"append",
"(",
"0",
")",
"else",
":",
"t_row",
".",
"append",
"(",
"(",
"i",
"-",
"m",
")",
"/",
"(",
"ma",
"-",
"m",
")",
")",
"t",
".",
"append",
"(",
"t_row",
")",
"return",
"t"
] | normalize from zero to one for row or table | [
"normalize",
"from",
"zero",
"to",
"one",
"for",
"row",
"or",
"table"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/transform.py#L18-L36 | train |
christophertbrown/bioscripts | ctbBio/transform.py | pertotal | def pertotal(table, option):
"""
calculate percent of total
"""
if option == 'table':
total = sum([i for line in table for i in line])
t = []
for row in table:
t_row = []
if option != 'table':
total = sum(row)
for i in row:
if total == 0:
t_row.append(0)
else:
t_row.append(i/total*100)
t.append(t_row)
return t | python | def pertotal(table, option):
"""
calculate percent of total
"""
if option == 'table':
total = sum([i for line in table for i in line])
t = []
for row in table:
t_row = []
if option != 'table':
total = sum(row)
for i in row:
if total == 0:
t_row.append(0)
else:
t_row.append(i/total*100)
t.append(t_row)
return t | [
"def",
"pertotal",
"(",
"table",
",",
"option",
")",
":",
"if",
"option",
"==",
"'table'",
":",
"total",
"=",
"sum",
"(",
"[",
"i",
"for",
"line",
"in",
"table",
"for",
"i",
"in",
"line",
"]",
")",
"t",
"=",
"[",
"]",
"for",
"row",
"in",
"table",
":",
"t_row",
"=",
"[",
"]",
"if",
"option",
"!=",
"'table'",
":",
"total",
"=",
"sum",
"(",
"row",
")",
"for",
"i",
"in",
"row",
":",
"if",
"total",
"==",
"0",
":",
"t_row",
".",
"append",
"(",
"0",
")",
"else",
":",
"t_row",
".",
"append",
"(",
"i",
"/",
"total",
"*",
"100",
")",
"t",
".",
"append",
"(",
"t_row",
")",
"return",
"t"
] | calculate percent of total | [
"calculate",
"percent",
"of",
"total"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/transform.py#L38-L55 | train |
christophertbrown/bioscripts | ctbBio/transform.py | scale | def scale(table):
"""
scale table based on the column with the largest sum
"""
t = []
columns = [[] for i in table[0]]
for row in table:
for i, v in enumerate(row):
columns[i].append(v)
sums = [float(sum(i)) for i in columns]
scale_to = float(max(sums))
scale_factor = [scale_to/i for i in sums if i != 0]
for row in table:
t.append([a * b for a,b in zip(row, scale_factor)])
return t | python | def scale(table):
"""
scale table based on the column with the largest sum
"""
t = []
columns = [[] for i in table[0]]
for row in table:
for i, v in enumerate(row):
columns[i].append(v)
sums = [float(sum(i)) for i in columns]
scale_to = float(max(sums))
scale_factor = [scale_to/i for i in sums if i != 0]
for row in table:
t.append([a * b for a,b in zip(row, scale_factor)])
return t | [
"def",
"scale",
"(",
"table",
")",
":",
"t",
"=",
"[",
"]",
"columns",
"=",
"[",
"[",
"]",
"for",
"i",
"in",
"table",
"[",
"0",
"]",
"]",
"for",
"row",
"in",
"table",
":",
"for",
"i",
",",
"v",
"in",
"enumerate",
"(",
"row",
")",
":",
"columns",
"[",
"i",
"]",
".",
"append",
"(",
"v",
")",
"sums",
"=",
"[",
"float",
"(",
"sum",
"(",
"i",
")",
")",
"for",
"i",
"in",
"columns",
"]",
"scale_to",
"=",
"float",
"(",
"max",
"(",
"sums",
")",
")",
"scale_factor",
"=",
"[",
"scale_to",
"/",
"i",
"for",
"i",
"in",
"sums",
"if",
"i",
"!=",
"0",
"]",
"for",
"row",
"in",
"table",
":",
"t",
".",
"append",
"(",
"[",
"a",
"*",
"b",
"for",
"a",
",",
"b",
"in",
"zip",
"(",
"row",
",",
"scale_factor",
")",
"]",
")",
"return",
"t"
] | scale table based on the column with the largest sum | [
"scale",
"table",
"based",
"on",
"the",
"column",
"with",
"the",
"largest",
"sum"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/transform.py#L79-L93 | train |
christophertbrown/bioscripts | ctbBio/transform.py | norm | def norm(table):
"""
fit to normal distribution
"""
print('# norm dist is broken', file=sys.stderr)
exit()
from matplotlib.pyplot import hist as hist
t = []
for i in table:
t.append(np.ndarray.tolist(hist(i, bins = len(i), normed = True)[0]))
return t | python | def norm(table):
"""
fit to normal distribution
"""
print('# norm dist is broken', file=sys.stderr)
exit()
from matplotlib.pyplot import hist as hist
t = []
for i in table:
t.append(np.ndarray.tolist(hist(i, bins = len(i), normed = True)[0]))
return t | [
"def",
"norm",
"(",
"table",
")",
":",
"print",
"(",
"'# norm dist is broken'",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"exit",
"(",
")",
"from",
"matplotlib",
".",
"pyplot",
"import",
"hist",
"as",
"hist",
"t",
"=",
"[",
"]",
"for",
"i",
"in",
"table",
":",
"t",
".",
"append",
"(",
"np",
".",
"ndarray",
".",
"tolist",
"(",
"hist",
"(",
"i",
",",
"bins",
"=",
"len",
"(",
"i",
")",
",",
"normed",
"=",
"True",
")",
"[",
"0",
"]",
")",
")",
"return",
"t"
] | fit to normal distribution | [
"fit",
"to",
"normal",
"distribution"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/transform.py#L95-L105 | train |
christophertbrown/bioscripts | ctbBio/transform.py | log_trans | def log_trans(table):
"""
log transform each value in table
"""
t = []
all = [item for sublist in table for item in sublist]
if min(all) == 0:
scale = min([i for i in all if i != 0]) * 10e-10
else:
scale = 0
for i in table:
t.append(np.ndarray.tolist(np.log10([j + scale for j in i])))
return t | python | def log_trans(table):
"""
log transform each value in table
"""
t = []
all = [item for sublist in table for item in sublist]
if min(all) == 0:
scale = min([i for i in all if i != 0]) * 10e-10
else:
scale = 0
for i in table:
t.append(np.ndarray.tolist(np.log10([j + scale for j in i])))
return t | [
"def",
"log_trans",
"(",
"table",
")",
":",
"t",
"=",
"[",
"]",
"all",
"=",
"[",
"item",
"for",
"sublist",
"in",
"table",
"for",
"item",
"in",
"sublist",
"]",
"if",
"min",
"(",
"all",
")",
"==",
"0",
":",
"scale",
"=",
"min",
"(",
"[",
"i",
"for",
"i",
"in",
"all",
"if",
"i",
"!=",
"0",
"]",
")",
"*",
"10e-10",
"else",
":",
"scale",
"=",
"0",
"for",
"i",
"in",
"table",
":",
"t",
".",
"append",
"(",
"np",
".",
"ndarray",
".",
"tolist",
"(",
"np",
".",
"log10",
"(",
"[",
"j",
"+",
"scale",
"for",
"j",
"in",
"i",
"]",
")",
")",
")",
"return",
"t"
] | log transform each value in table | [
"log",
"transform",
"each",
"value",
"in",
"table"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/transform.py#L107-L119 | train |
christophertbrown/bioscripts | ctbBio/transform.py | box_cox | def box_cox(table):
"""
box-cox transform table
"""
from scipy.stats import boxcox as bc
t = []
for i in table:
if min(i) == 0:
scale = min([j for j in i if j != 0]) * 10e-10
else:
scale = 0
t.append(np.ndarray.tolist(bc(np.array([j + scale for j in i]))[0]))
return t | python | def box_cox(table):
"""
box-cox transform table
"""
from scipy.stats import boxcox as bc
t = []
for i in table:
if min(i) == 0:
scale = min([j for j in i if j != 0]) * 10e-10
else:
scale = 0
t.append(np.ndarray.tolist(bc(np.array([j + scale for j in i]))[0]))
return t | [
"def",
"box_cox",
"(",
"table",
")",
":",
"from",
"scipy",
".",
"stats",
"import",
"boxcox",
"as",
"bc",
"t",
"=",
"[",
"]",
"for",
"i",
"in",
"table",
":",
"if",
"min",
"(",
"i",
")",
"==",
"0",
":",
"scale",
"=",
"min",
"(",
"[",
"j",
"for",
"j",
"in",
"i",
"if",
"j",
"!=",
"0",
"]",
")",
"*",
"10e-10",
"else",
":",
"scale",
"=",
"0",
"t",
".",
"append",
"(",
"np",
".",
"ndarray",
".",
"tolist",
"(",
"bc",
"(",
"np",
".",
"array",
"(",
"[",
"j",
"+",
"scale",
"for",
"j",
"in",
"i",
"]",
")",
")",
"[",
"0",
"]",
")",
")",
"return",
"t"
] | box-cox transform table | [
"box",
"-",
"cox",
"transform",
"table"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/transform.py#L121-L133 | train |
christophertbrown/bioscripts | ctbBio/transform.py | inh | def inh(table):
"""
inverse hyperbolic sine transformation
"""
t = []
for i in table:
t.append(np.ndarray.tolist(np.arcsinh(i)))
return t | python | def inh(table):
"""
inverse hyperbolic sine transformation
"""
t = []
for i in table:
t.append(np.ndarray.tolist(np.arcsinh(i)))
return t | [
"def",
"inh",
"(",
"table",
")",
":",
"t",
"=",
"[",
"]",
"for",
"i",
"in",
"table",
":",
"t",
".",
"append",
"(",
"np",
".",
"ndarray",
".",
"tolist",
"(",
"np",
".",
"arcsinh",
"(",
"i",
")",
")",
")",
"return",
"t"
] | inverse hyperbolic sine transformation | [
"inverse",
"hyperbolic",
"sine",
"transformation"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/transform.py#L135-L142 | train |
christophertbrown/bioscripts | ctbBio/transform.py | diri | def diri(table):
"""
from SparCC - "randomly draw from the corresponding posterior
Dirichlet distribution with a uniform prior"
"""
t = []
for i in table:
a = [j + 1 for j in i]
t.append(np.ndarray.tolist(np.random.mtrand.dirichlet(a)))
return t | python | def diri(table):
"""
from SparCC - "randomly draw from the corresponding posterior
Dirichlet distribution with a uniform prior"
"""
t = []
for i in table:
a = [j + 1 for j in i]
t.append(np.ndarray.tolist(np.random.mtrand.dirichlet(a)))
return t | [
"def",
"diri",
"(",
"table",
")",
":",
"t",
"=",
"[",
"]",
"for",
"i",
"in",
"table",
":",
"a",
"=",
"[",
"j",
"+",
"1",
"for",
"j",
"in",
"i",
"]",
"t",
".",
"append",
"(",
"np",
".",
"ndarray",
".",
"tolist",
"(",
"np",
".",
"random",
".",
"mtrand",
".",
"dirichlet",
"(",
"a",
")",
")",
")",
"return",
"t"
] | from SparCC - "randomly draw from the corresponding posterior
Dirichlet distribution with a uniform prior" | [
"from",
"SparCC",
"-",
"randomly",
"draw",
"from",
"the",
"corresponding",
"posterior",
"Dirichlet",
"distribution",
"with",
"a",
"uniform",
"prior"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/transform.py#L144-L153 | train |
smdabdoub/phylotoast | bin/sanger_qiimify.py | generate_barcodes | def generate_barcodes(nIds, codeLen=12):
"""
Given a list of sample IDs generate unique n-base barcodes for each.
Note that only 4^n unique barcodes are possible.
"""
def next_code(b, c, i):
return c[:i] + b + (c[i+1:] if i < -1 else '')
def rand_base():
return random.choice(['A', 'T', 'C', 'G'])
def rand_seq(n):
return ''.join([rand_base() for _ in range(n)])
# homopolymer filter regex: match if 4 identical bases in a row
hpf = re.compile('aaaa|cccc|gggg|tttt', re.IGNORECASE)
while True:
codes = [rand_seq(codeLen)]
if (hpf.search(codes[0]) is None):
break
idx = 0
while len(codes) < nIds:
idx -= 1
if idx < -codeLen:
idx = -1
codes.append(rand_seq(codeLen))
else:
nc = next_code(rand_base(), codes[-1], idx)
if hpf.search(nc) is None:
codes.append(nc)
codes = list(set(codes))
return codes | python | def generate_barcodes(nIds, codeLen=12):
"""
Given a list of sample IDs generate unique n-base barcodes for each.
Note that only 4^n unique barcodes are possible.
"""
def next_code(b, c, i):
return c[:i] + b + (c[i+1:] if i < -1 else '')
def rand_base():
return random.choice(['A', 'T', 'C', 'G'])
def rand_seq(n):
return ''.join([rand_base() for _ in range(n)])
# homopolymer filter regex: match if 4 identical bases in a row
hpf = re.compile('aaaa|cccc|gggg|tttt', re.IGNORECASE)
while True:
codes = [rand_seq(codeLen)]
if (hpf.search(codes[0]) is None):
break
idx = 0
while len(codes) < nIds:
idx -= 1
if idx < -codeLen:
idx = -1
codes.append(rand_seq(codeLen))
else:
nc = next_code(rand_base(), codes[-1], idx)
if hpf.search(nc) is None:
codes.append(nc)
codes = list(set(codes))
return codes | [
"def",
"generate_barcodes",
"(",
"nIds",
",",
"codeLen",
"=",
"12",
")",
":",
"def",
"next_code",
"(",
"b",
",",
"c",
",",
"i",
")",
":",
"return",
"c",
"[",
":",
"i",
"]",
"+",
"b",
"+",
"(",
"c",
"[",
"i",
"+",
"1",
":",
"]",
"if",
"i",
"<",
"-",
"1",
"else",
"''",
")",
"def",
"rand_base",
"(",
")",
":",
"return",
"random",
".",
"choice",
"(",
"[",
"'A'",
",",
"'T'",
",",
"'C'",
",",
"'G'",
"]",
")",
"def",
"rand_seq",
"(",
"n",
")",
":",
"return",
"''",
".",
"join",
"(",
"[",
"rand_base",
"(",
")",
"for",
"_",
"in",
"range",
"(",
"n",
")",
"]",
")",
"# homopolymer filter regex: match if 4 identical bases in a row",
"hpf",
"=",
"re",
".",
"compile",
"(",
"'aaaa|cccc|gggg|tttt'",
",",
"re",
".",
"IGNORECASE",
")",
"while",
"True",
":",
"codes",
"=",
"[",
"rand_seq",
"(",
"codeLen",
")",
"]",
"if",
"(",
"hpf",
".",
"search",
"(",
"codes",
"[",
"0",
"]",
")",
"is",
"None",
")",
":",
"break",
"idx",
"=",
"0",
"while",
"len",
"(",
"codes",
")",
"<",
"nIds",
":",
"idx",
"-=",
"1",
"if",
"idx",
"<",
"-",
"codeLen",
":",
"idx",
"=",
"-",
"1",
"codes",
".",
"append",
"(",
"rand_seq",
"(",
"codeLen",
")",
")",
"else",
":",
"nc",
"=",
"next_code",
"(",
"rand_base",
"(",
")",
",",
"codes",
"[",
"-",
"1",
"]",
",",
"idx",
")",
"if",
"hpf",
".",
"search",
"(",
"nc",
")",
"is",
"None",
":",
"codes",
".",
"append",
"(",
"nc",
")",
"codes",
"=",
"list",
"(",
"set",
"(",
"codes",
")",
")",
"return",
"codes"
] | Given a list of sample IDs generate unique n-base barcodes for each.
Note that only 4^n unique barcodes are possible. | [
"Given",
"a",
"list",
"of",
"sample",
"IDs",
"generate",
"unique",
"n",
"-",
"base",
"barcodes",
"for",
"each",
".",
"Note",
"that",
"only",
"4^n",
"unique",
"barcodes",
"are",
"possible",
"."
] | 0b74ef171e6a84761710548501dfac71285a58a3 | https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/sanger_qiimify.py#L94-L128 | train |
smdabdoub/phylotoast | bin/sanger_qiimify.py | scrobble_data_dir | def scrobble_data_dir(dataDir, sampleMap, outF, qualF=None, idopt=None,
utf16=False):
"""
Given a sample ID and a mapping, modify a Sanger FASTA file
to include the barcode and 'primer' in the sequence data
and change the description line as needed.
"""
seqcount = 0
outfiles = [osp.split(outF.name)[1]]
if qualF:
outfiles.append(osp.split(qualF.name)[1])
for item in os.listdir(dataDir):
if item in outfiles or not osp.isfile(os.path.join(dataDir, item)):
continue
# FASTA files
if osp.splitext(item)[1] in file_types['fasta']:
fh = open_enc(os.path.join(dataDir, item), utf16)
records = SeqIO.parse(fh, 'fasta')
for record in records:
if isinstance(idopt, tuple):
sep, field = idopt
sampleID = record.id.split(sep)[field - 1]
else:
sampleID = osp.splitext(item)[0]
record.seq = (sampleMap[sampleID].barcode +
sampleMap[sampleID].primer +
record.seq)
SeqIO.write(record, outF, 'fasta')
seqcount += 1
fh.close()
# QUAL files
elif qualF and osp.splitext(item)[1] in file_types['qual']:
fh = open_enc(os.path.join(dataDir, item), utf16)
records = SeqIO.parse(fh, 'qual')
for record in records:
mi = sampleMap[sampleMap.keys()[0]]
quals = [40 for _ in range(len(mi.barcode) + len(mi.primer))]
record.letter_annotations['phred_quality'][0:0] = quals
SeqIO.write(record, qualF, 'qual')
fh.close()
return seqcount | python | def scrobble_data_dir(dataDir, sampleMap, outF, qualF=None, idopt=None,
utf16=False):
"""
Given a sample ID and a mapping, modify a Sanger FASTA file
to include the barcode and 'primer' in the sequence data
and change the description line as needed.
"""
seqcount = 0
outfiles = [osp.split(outF.name)[1]]
if qualF:
outfiles.append(osp.split(qualF.name)[1])
for item in os.listdir(dataDir):
if item in outfiles or not osp.isfile(os.path.join(dataDir, item)):
continue
# FASTA files
if osp.splitext(item)[1] in file_types['fasta']:
fh = open_enc(os.path.join(dataDir, item), utf16)
records = SeqIO.parse(fh, 'fasta')
for record in records:
if isinstance(idopt, tuple):
sep, field = idopt
sampleID = record.id.split(sep)[field - 1]
else:
sampleID = osp.splitext(item)[0]
record.seq = (sampleMap[sampleID].barcode +
sampleMap[sampleID].primer +
record.seq)
SeqIO.write(record, outF, 'fasta')
seqcount += 1
fh.close()
# QUAL files
elif qualF and osp.splitext(item)[1] in file_types['qual']:
fh = open_enc(os.path.join(dataDir, item), utf16)
records = SeqIO.parse(fh, 'qual')
for record in records:
mi = sampleMap[sampleMap.keys()[0]]
quals = [40 for _ in range(len(mi.barcode) + len(mi.primer))]
record.letter_annotations['phred_quality'][0:0] = quals
SeqIO.write(record, qualF, 'qual')
fh.close()
return seqcount | [
"def",
"scrobble_data_dir",
"(",
"dataDir",
",",
"sampleMap",
",",
"outF",
",",
"qualF",
"=",
"None",
",",
"idopt",
"=",
"None",
",",
"utf16",
"=",
"False",
")",
":",
"seqcount",
"=",
"0",
"outfiles",
"=",
"[",
"osp",
".",
"split",
"(",
"outF",
".",
"name",
")",
"[",
"1",
"]",
"]",
"if",
"qualF",
":",
"outfiles",
".",
"append",
"(",
"osp",
".",
"split",
"(",
"qualF",
".",
"name",
")",
"[",
"1",
"]",
")",
"for",
"item",
"in",
"os",
".",
"listdir",
"(",
"dataDir",
")",
":",
"if",
"item",
"in",
"outfiles",
"or",
"not",
"osp",
".",
"isfile",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dataDir",
",",
"item",
")",
")",
":",
"continue",
"# FASTA files",
"if",
"osp",
".",
"splitext",
"(",
"item",
")",
"[",
"1",
"]",
"in",
"file_types",
"[",
"'fasta'",
"]",
":",
"fh",
"=",
"open_enc",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dataDir",
",",
"item",
")",
",",
"utf16",
")",
"records",
"=",
"SeqIO",
".",
"parse",
"(",
"fh",
",",
"'fasta'",
")",
"for",
"record",
"in",
"records",
":",
"if",
"isinstance",
"(",
"idopt",
",",
"tuple",
")",
":",
"sep",
",",
"field",
"=",
"idopt",
"sampleID",
"=",
"record",
".",
"id",
".",
"split",
"(",
"sep",
")",
"[",
"field",
"-",
"1",
"]",
"else",
":",
"sampleID",
"=",
"osp",
".",
"splitext",
"(",
"item",
")",
"[",
"0",
"]",
"record",
".",
"seq",
"=",
"(",
"sampleMap",
"[",
"sampleID",
"]",
".",
"barcode",
"+",
"sampleMap",
"[",
"sampleID",
"]",
".",
"primer",
"+",
"record",
".",
"seq",
")",
"SeqIO",
".",
"write",
"(",
"record",
",",
"outF",
",",
"'fasta'",
")",
"seqcount",
"+=",
"1",
"fh",
".",
"close",
"(",
")",
"# QUAL files",
"elif",
"qualF",
"and",
"osp",
".",
"splitext",
"(",
"item",
")",
"[",
"1",
"]",
"in",
"file_types",
"[",
"'qual'",
"]",
":",
"fh",
"=",
"open_enc",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dataDir",
",",
"item",
")",
",",
"utf16",
")",
"records",
"=",
"SeqIO",
".",
"parse",
"(",
"fh",
",",
"'qual'",
")",
"for",
"record",
"in",
"records",
":",
"mi",
"=",
"sampleMap",
"[",
"sampleMap",
".",
"keys",
"(",
")",
"[",
"0",
"]",
"]",
"quals",
"=",
"[",
"40",
"for",
"_",
"in",
"range",
"(",
"len",
"(",
"mi",
".",
"barcode",
")",
"+",
"len",
"(",
"mi",
".",
"primer",
")",
")",
"]",
"record",
".",
"letter_annotations",
"[",
"'phred_quality'",
"]",
"[",
"0",
":",
"0",
"]",
"=",
"quals",
"SeqIO",
".",
"write",
"(",
"record",
",",
"qualF",
",",
"'qual'",
")",
"fh",
".",
"close",
"(",
")",
"return",
"seqcount"
] | Given a sample ID and a mapping, modify a Sanger FASTA file
to include the barcode and 'primer' in the sequence data
and change the description line as needed. | [
"Given",
"a",
"sample",
"ID",
"and",
"a",
"mapping",
"modify",
"a",
"Sanger",
"FASTA",
"file",
"to",
"include",
"the",
"barcode",
"and",
"primer",
"in",
"the",
"sequence",
"data",
"and",
"change",
"the",
"description",
"line",
"as",
"needed",
"."
] | 0b74ef171e6a84761710548501dfac71285a58a3 | https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/sanger_qiimify.py#L158-L199 | train |
smdabdoub/phylotoast | bin/sanger_qiimify.py | handle_program_options | def handle_program_options():
"""
Uses the built-in argparse module to handle command-line options for the
program.
:return: The gathered command-line options specified by the user
:rtype: argparse.ArgumentParser
"""
parser = argparse.ArgumentParser(description="Convert Sanger-sequencing \
derived data files for use with the \
metagenomics analysis program QIIME, by \
extracting Sample ID information, adding\
barcodes and primers to the sequence \
data, and outputting a mapping file and\
single FASTA-formatted sequence file \
formed by concatenating all input data.")
parser.add_argument('-i', '--input_dir', required=True,
help="The directory containing sequence data files. \
Assumes all data files are placed in this \
directory. For files organized within folders by\
sample, use -s in addition.")
parser.add_argument('-m', '--map_file', default='map.txt',
help="QIIME-formatted mapping file linking Sample IDs \
with barcodes and primers.")
parser.add_argument('-o', '--output', default='output.fasta',
metavar='OUTPUT_FILE',
help="Single file containing all sequence data found \
in input_dir, FASTA-formatted with barcode and \
primer preprended to sequence. If the -q option \
is passed, any quality data will also be output \
to a single file of the same name with a .qual \
extension.")
parser.add_argument('-b', '--barcode_length', type=int, default=12,
help="Length of the generated barcode sequences. \
Default is 12 (QIIME default), minimum is 8.")
parser.add_argument('-q', '--qual', action='store_true', default=False,
help="Instruct the program to look for quality \
input files")
parser.add_argument('-u', '--utf16', action='store_true', default=False,
help="UTF-16 encoded input files")
parser.add_argument('-t', '--treatment',
help="Inserts an additional column into the mapping \
file specifying some treatment or other variable\
that separates the current set of sequences \
from any other set of seqeunces. For example:\
-t DiseaseState=healthy")
# data input options
sidGroup = parser.add_mutually_exclusive_group(required=True)
sidGroup.add_argument('-d', '--identifier_pattern',
action=ValidateIDPattern,
nargs=2, metavar=('SEPARATOR', 'FIELD_NUMBER'),
help="Indicates how to extract the Sample ID from \
the description line. Specify two things: \
1. Field separator, 2. Field number of Sample \
ID (1 or greater). If the separator is a space \
or tab, use \s or \\t respectively. \
Example: >ka-SampleID-2091, use -i - 2, \
indicating - is the separator and the Sample ID\
is field #2.")
sidGroup.add_argument('-f', '--filename_sample_id', action='store_true',
default=False, help='Specify that the program should\
the name of each fasta file as the Sample ID for use\
in the mapping file. This is meant to be used when \
all sequence data for a sample is stored in a single\
file.')
return parser.parse_args() | python | def handle_program_options():
"""
Uses the built-in argparse module to handle command-line options for the
program.
:return: The gathered command-line options specified by the user
:rtype: argparse.ArgumentParser
"""
parser = argparse.ArgumentParser(description="Convert Sanger-sequencing \
derived data files for use with the \
metagenomics analysis program QIIME, by \
extracting Sample ID information, adding\
barcodes and primers to the sequence \
data, and outputting a mapping file and\
single FASTA-formatted sequence file \
formed by concatenating all input data.")
parser.add_argument('-i', '--input_dir', required=True,
help="The directory containing sequence data files. \
Assumes all data files are placed in this \
directory. For files organized within folders by\
sample, use -s in addition.")
parser.add_argument('-m', '--map_file', default='map.txt',
help="QIIME-formatted mapping file linking Sample IDs \
with barcodes and primers.")
parser.add_argument('-o', '--output', default='output.fasta',
metavar='OUTPUT_FILE',
help="Single file containing all sequence data found \
in input_dir, FASTA-formatted with barcode and \
primer preprended to sequence. If the -q option \
is passed, any quality data will also be output \
to a single file of the same name with a .qual \
extension.")
parser.add_argument('-b', '--barcode_length', type=int, default=12,
help="Length of the generated barcode sequences. \
Default is 12 (QIIME default), minimum is 8.")
parser.add_argument('-q', '--qual', action='store_true', default=False,
help="Instruct the program to look for quality \
input files")
parser.add_argument('-u', '--utf16', action='store_true', default=False,
help="UTF-16 encoded input files")
parser.add_argument('-t', '--treatment',
help="Inserts an additional column into the mapping \
file specifying some treatment or other variable\
that separates the current set of sequences \
from any other set of seqeunces. For example:\
-t DiseaseState=healthy")
# data input options
sidGroup = parser.add_mutually_exclusive_group(required=True)
sidGroup.add_argument('-d', '--identifier_pattern',
action=ValidateIDPattern,
nargs=2, metavar=('SEPARATOR', 'FIELD_NUMBER'),
help="Indicates how to extract the Sample ID from \
the description line. Specify two things: \
1. Field separator, 2. Field number of Sample \
ID (1 or greater). If the separator is a space \
or tab, use \s or \\t respectively. \
Example: >ka-SampleID-2091, use -i - 2, \
indicating - is the separator and the Sample ID\
is field #2.")
sidGroup.add_argument('-f', '--filename_sample_id', action='store_true',
default=False, help='Specify that the program should\
the name of each fasta file as the Sample ID for use\
in the mapping file. This is meant to be used when \
all sequence data for a sample is stored in a single\
file.')
return parser.parse_args() | [
"def",
"handle_program_options",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"\"Convert Sanger-sequencing \\\n derived data files for use with the \\\n metagenomics analysis program QIIME, by \\\n extracting Sample ID information, adding\\\n barcodes and primers to the sequence \\\n data, and outputting a mapping file and\\\n single FASTA-formatted sequence file \\\n formed by concatenating all input data.\"",
")",
"parser",
".",
"add_argument",
"(",
"'-i'",
",",
"'--input_dir'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"\"The directory containing sequence data files. \\\n Assumes all data files are placed in this \\\n directory. For files organized within folders by\\\n sample, use -s in addition.\"",
")",
"parser",
".",
"add_argument",
"(",
"'-m'",
",",
"'--map_file'",
",",
"default",
"=",
"'map.txt'",
",",
"help",
"=",
"\"QIIME-formatted mapping file linking Sample IDs \\\n with barcodes and primers.\"",
")",
"parser",
".",
"add_argument",
"(",
"'-o'",
",",
"'--output'",
",",
"default",
"=",
"'output.fasta'",
",",
"metavar",
"=",
"'OUTPUT_FILE'",
",",
"help",
"=",
"\"Single file containing all sequence data found \\\n in input_dir, FASTA-formatted with barcode and \\\n primer preprended to sequence. If the -q option \\\n is passed, any quality data will also be output \\\n to a single file of the same name with a .qual \\\n extension.\"",
")",
"parser",
".",
"add_argument",
"(",
"'-b'",
",",
"'--barcode_length'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"12",
",",
"help",
"=",
"\"Length of the generated barcode sequences. \\\n Default is 12 (QIIME default), minimum is 8.\"",
")",
"parser",
".",
"add_argument",
"(",
"'-q'",
",",
"'--qual'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"\"Instruct the program to look for quality \\\n input files\"",
")",
"parser",
".",
"add_argument",
"(",
"'-u'",
",",
"'--utf16'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"\"UTF-16 encoded input files\"",
")",
"parser",
".",
"add_argument",
"(",
"'-t'",
",",
"'--treatment'",
",",
"help",
"=",
"\"Inserts an additional column into the mapping \\\n file specifying some treatment or other variable\\\n that separates the current set of sequences \\\n from any other set of seqeunces. For example:\\\n -t DiseaseState=healthy\"",
")",
"# data input options",
"sidGroup",
"=",
"parser",
".",
"add_mutually_exclusive_group",
"(",
"required",
"=",
"True",
")",
"sidGroup",
".",
"add_argument",
"(",
"'-d'",
",",
"'--identifier_pattern'",
",",
"action",
"=",
"ValidateIDPattern",
",",
"nargs",
"=",
"2",
",",
"metavar",
"=",
"(",
"'SEPARATOR'",
",",
"'FIELD_NUMBER'",
")",
",",
"help",
"=",
"\"Indicates how to extract the Sample ID from \\\n the description line. Specify two things: \\\n 1. Field separator, 2. Field number of Sample \\\n ID (1 or greater). If the separator is a space \\\n or tab, use \\s or \\\\t respectively. \\\n Example: >ka-SampleID-2091, use -i - 2, \\\n indicating - is the separator and the Sample ID\\\n is field #2.\"",
")",
"sidGroup",
".",
"add_argument",
"(",
"'-f'",
",",
"'--filename_sample_id'",
",",
"action",
"=",
"'store_true'",
",",
"default",
"=",
"False",
",",
"help",
"=",
"'Specify that the program should\\\n the name of each fasta file as the Sample ID for use\\\n in the mapping file. This is meant to be used when \\\n all sequence data for a sample is stored in a single\\\n file.'",
")",
"return",
"parser",
".",
"parse_args",
"(",
")"
] | Uses the built-in argparse module to handle command-line options for the
program.
:return: The gathered command-line options specified by the user
:rtype: argparse.ArgumentParser | [
"Uses",
"the",
"built",
"-",
"in",
"argparse",
"module",
"to",
"handle",
"command",
"-",
"line",
"options",
"for",
"the",
"program",
"."
] | 0b74ef171e6a84761710548501dfac71285a58a3 | https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/sanger_qiimify.py#L202-L271 | train |
smdabdoub/phylotoast | bin/transform_biom.py | arcsin_sqrt | def arcsin_sqrt(biom_tbl):
"""
Applies the arcsine square root transform to the
given BIOM-format table
"""
arcsint = lambda data, id_, md: np.arcsin(np.sqrt(data))
tbl_relabd = relative_abd(biom_tbl)
tbl_asin = tbl_relabd.transform(arcsint, inplace=False)
return tbl_asin | python | def arcsin_sqrt(biom_tbl):
"""
Applies the arcsine square root transform to the
given BIOM-format table
"""
arcsint = lambda data, id_, md: np.arcsin(np.sqrt(data))
tbl_relabd = relative_abd(biom_tbl)
tbl_asin = tbl_relabd.transform(arcsint, inplace=False)
return tbl_asin | [
"def",
"arcsin_sqrt",
"(",
"biom_tbl",
")",
":",
"arcsint",
"=",
"lambda",
"data",
",",
"id_",
",",
"md",
":",
"np",
".",
"arcsin",
"(",
"np",
".",
"sqrt",
"(",
"data",
")",
")",
"tbl_relabd",
"=",
"relative_abd",
"(",
"biom_tbl",
")",
"tbl_asin",
"=",
"tbl_relabd",
".",
"transform",
"(",
"arcsint",
",",
"inplace",
"=",
"False",
")",
"return",
"tbl_asin"
] | Applies the arcsine square root transform to the
given BIOM-format table | [
"Applies",
"the",
"arcsine",
"square",
"root",
"transform",
"to",
"the",
"given",
"BIOM",
"-",
"format",
"table"
] | 0b74ef171e6a84761710548501dfac71285a58a3 | https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/transform_biom.py#L78-L88 | train |
christophertbrown/bioscripts | ctbBio/genome_variation.py | parse_sam | def parse_sam(sam, qual):
"""
parse sam file and check mapping quality
"""
for line in sam:
if line.startswith('@'):
continue
line = line.strip().split()
if int(line[4]) == 0 or int(line[4]) < qual:
continue
yield line | python | def parse_sam(sam, qual):
"""
parse sam file and check mapping quality
"""
for line in sam:
if line.startswith('@'):
continue
line = line.strip().split()
if int(line[4]) == 0 or int(line[4]) < qual:
continue
yield line | [
"def",
"parse_sam",
"(",
"sam",
",",
"qual",
")",
":",
"for",
"line",
"in",
"sam",
":",
"if",
"line",
".",
"startswith",
"(",
"'@'",
")",
":",
"continue",
"line",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"if",
"int",
"(",
"line",
"[",
"4",
"]",
")",
"==",
"0",
"or",
"int",
"(",
"line",
"[",
"4",
"]",
")",
"<",
"qual",
":",
"continue",
"yield",
"line"
] | parse sam file and check mapping quality | [
"parse",
"sam",
"file",
"and",
"check",
"mapping",
"quality"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/genome_variation.py#L23-L33 | train |
christophertbrown/bioscripts | ctbBio/genome_variation.py | rc_stats | def rc_stats(stats):
"""
reverse completement stats
"""
rc_nucs = {'A':'T', 'T':'A', 'G':'C', 'C':'G', 'N':'N'}
rcs = []
for pos in reversed(stats):
rc = {}
rc['reference frequencey'] = pos['reference frequency']
rc['consensus frequencey'] = pos['consensus frequency']
rc['In'] = pos['In']
rc['Del'] = pos['Del']
rc['ref'] = rc_nucs[pos['ref']]
rc['consensus'] = (rc_nucs[pos['consensus'][0]], pos['consensus'][1])
for base, stat in list(pos.items()):
if base in rc_nucs:
rc[rc_nucs[base]] = stat
rcs.append(rc)
return rcs | python | def rc_stats(stats):
"""
reverse completement stats
"""
rc_nucs = {'A':'T', 'T':'A', 'G':'C', 'C':'G', 'N':'N'}
rcs = []
for pos in reversed(stats):
rc = {}
rc['reference frequencey'] = pos['reference frequency']
rc['consensus frequencey'] = pos['consensus frequency']
rc['In'] = pos['In']
rc['Del'] = pos['Del']
rc['ref'] = rc_nucs[pos['ref']]
rc['consensus'] = (rc_nucs[pos['consensus'][0]], pos['consensus'][1])
for base, stat in list(pos.items()):
if base in rc_nucs:
rc[rc_nucs[base]] = stat
rcs.append(rc)
return rcs | [
"def",
"rc_stats",
"(",
"stats",
")",
":",
"rc_nucs",
"=",
"{",
"'A'",
":",
"'T'",
",",
"'T'",
":",
"'A'",
",",
"'G'",
":",
"'C'",
",",
"'C'",
":",
"'G'",
",",
"'N'",
":",
"'N'",
"}",
"rcs",
"=",
"[",
"]",
"for",
"pos",
"in",
"reversed",
"(",
"stats",
")",
":",
"rc",
"=",
"{",
"}",
"rc",
"[",
"'reference frequencey'",
"]",
"=",
"pos",
"[",
"'reference frequency'",
"]",
"rc",
"[",
"'consensus frequencey'",
"]",
"=",
"pos",
"[",
"'consensus frequency'",
"]",
"rc",
"[",
"'In'",
"]",
"=",
"pos",
"[",
"'In'",
"]",
"rc",
"[",
"'Del'",
"]",
"=",
"pos",
"[",
"'Del'",
"]",
"rc",
"[",
"'ref'",
"]",
"=",
"rc_nucs",
"[",
"pos",
"[",
"'ref'",
"]",
"]",
"rc",
"[",
"'consensus'",
"]",
"=",
"(",
"rc_nucs",
"[",
"pos",
"[",
"'consensus'",
"]",
"[",
"0",
"]",
"]",
",",
"pos",
"[",
"'consensus'",
"]",
"[",
"1",
"]",
")",
"for",
"base",
",",
"stat",
"in",
"list",
"(",
"pos",
".",
"items",
"(",
")",
")",
":",
"if",
"base",
"in",
"rc_nucs",
":",
"rc",
"[",
"rc_nucs",
"[",
"base",
"]",
"]",
"=",
"stat",
"rcs",
".",
"append",
"(",
"rc",
")",
"return",
"rcs"
] | reverse completement stats | [
"reverse",
"completement",
"stats"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/genome_variation.py#L138-L156 | train |
christophertbrown/bioscripts | ctbBio/genome_variation.py | parse_codons | def parse_codons(ref, start, end, strand):
"""
parse codon nucleotide positions in range start -> end, wrt strand
"""
codon = []
c = cycle([1, 2, 3])
ref = ref[start - 1:end]
if strand == -1:
ref = rc_stats(ref)
for pos in ref:
n = next(c)
codon.append(pos)
if n == 3:
yield codon
codon = [] | python | def parse_codons(ref, start, end, strand):
"""
parse codon nucleotide positions in range start -> end, wrt strand
"""
codon = []
c = cycle([1, 2, 3])
ref = ref[start - 1:end]
if strand == -1:
ref = rc_stats(ref)
for pos in ref:
n = next(c)
codon.append(pos)
if n == 3:
yield codon
codon = [] | [
"def",
"parse_codons",
"(",
"ref",
",",
"start",
",",
"end",
",",
"strand",
")",
":",
"codon",
"=",
"[",
"]",
"c",
"=",
"cycle",
"(",
"[",
"1",
",",
"2",
",",
"3",
"]",
")",
"ref",
"=",
"ref",
"[",
"start",
"-",
"1",
":",
"end",
"]",
"if",
"strand",
"==",
"-",
"1",
":",
"ref",
"=",
"rc_stats",
"(",
"ref",
")",
"for",
"pos",
"in",
"ref",
":",
"n",
"=",
"next",
"(",
"c",
")",
"codon",
".",
"append",
"(",
"pos",
")",
"if",
"n",
"==",
"3",
":",
"yield",
"codon",
"codon",
"=",
"[",
"]"
] | parse codon nucleotide positions in range start -> end, wrt strand | [
"parse",
"codon",
"nucleotide",
"positions",
"in",
"range",
"start",
"-",
">",
"end",
"wrt",
"strand"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/genome_variation.py#L158-L172 | train |
christophertbrown/bioscripts | ctbBio/genome_variation.py | calc_coverage | def calc_coverage(ref, start, end, length, nucs):
"""
calculate coverage for positions in range start -> end
"""
ref = ref[start - 1:end]
bases = 0
for pos in ref:
for base, count in list(pos.items()):
if base in nucs:
bases += count
return float(bases)/float(length) | python | def calc_coverage(ref, start, end, length, nucs):
"""
calculate coverage for positions in range start -> end
"""
ref = ref[start - 1:end]
bases = 0
for pos in ref:
for base, count in list(pos.items()):
if base in nucs:
bases += count
return float(bases)/float(length) | [
"def",
"calc_coverage",
"(",
"ref",
",",
"start",
",",
"end",
",",
"length",
",",
"nucs",
")",
":",
"ref",
"=",
"ref",
"[",
"start",
"-",
"1",
":",
"end",
"]",
"bases",
"=",
"0",
"for",
"pos",
"in",
"ref",
":",
"for",
"base",
",",
"count",
"in",
"list",
"(",
"pos",
".",
"items",
"(",
")",
")",
":",
"if",
"base",
"in",
"nucs",
":",
"bases",
"+=",
"count",
"return",
"float",
"(",
"bases",
")",
"/",
"float",
"(",
"length",
")"
] | calculate coverage for positions in range start -> end | [
"calculate",
"coverage",
"for",
"positions",
"in",
"range",
"start",
"-",
">",
"end"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/genome_variation.py#L174-L184 | train |
christophertbrown/bioscripts | ctbBio/genome_variation.py | parse_gbk | def parse_gbk(gbks):
"""
parse gbk file
"""
for gbk in gbks:
for record in SeqIO.parse(open(gbk), 'genbank'):
for feature in record.features:
if feature.type == 'gene':
try:
locus = feature.qualifiers['locus_tag'][0]
except:
continue
if feature.type == 'CDS':
try:
locus = feature.qualifiers['locus_tag'][0]
except:
pass
start = int(feature.location.start) + int(feature.qualifiers['codon_start'][0])
end, strand = int(feature.location.end), feature.location.strand
if strand is None:
strand = 1
else:
strand = -1
contig = record.id
# contig = record.id.rsplit('.', 1)[0]
yield contig, [locus, \
[start, end, strand], \
feature.qualifiers] | python | def parse_gbk(gbks):
"""
parse gbk file
"""
for gbk in gbks:
for record in SeqIO.parse(open(gbk), 'genbank'):
for feature in record.features:
if feature.type == 'gene':
try:
locus = feature.qualifiers['locus_tag'][0]
except:
continue
if feature.type == 'CDS':
try:
locus = feature.qualifiers['locus_tag'][0]
except:
pass
start = int(feature.location.start) + int(feature.qualifiers['codon_start'][0])
end, strand = int(feature.location.end), feature.location.strand
if strand is None:
strand = 1
else:
strand = -1
contig = record.id
# contig = record.id.rsplit('.', 1)[0]
yield contig, [locus, \
[start, end, strand], \
feature.qualifiers] | [
"def",
"parse_gbk",
"(",
"gbks",
")",
":",
"for",
"gbk",
"in",
"gbks",
":",
"for",
"record",
"in",
"SeqIO",
".",
"parse",
"(",
"open",
"(",
"gbk",
")",
",",
"'genbank'",
")",
":",
"for",
"feature",
"in",
"record",
".",
"features",
":",
"if",
"feature",
".",
"type",
"==",
"'gene'",
":",
"try",
":",
"locus",
"=",
"feature",
".",
"qualifiers",
"[",
"'locus_tag'",
"]",
"[",
"0",
"]",
"except",
":",
"continue",
"if",
"feature",
".",
"type",
"==",
"'CDS'",
":",
"try",
":",
"locus",
"=",
"feature",
".",
"qualifiers",
"[",
"'locus_tag'",
"]",
"[",
"0",
"]",
"except",
":",
"pass",
"start",
"=",
"int",
"(",
"feature",
".",
"location",
".",
"start",
")",
"+",
"int",
"(",
"feature",
".",
"qualifiers",
"[",
"'codon_start'",
"]",
"[",
"0",
"]",
")",
"end",
",",
"strand",
"=",
"int",
"(",
"feature",
".",
"location",
".",
"end",
")",
",",
"feature",
".",
"location",
".",
"strand",
"if",
"strand",
"is",
"None",
":",
"strand",
"=",
"1",
"else",
":",
"strand",
"=",
"-",
"1",
"contig",
"=",
"record",
".",
"id",
"# contig = record.id.rsplit('.', 1)[0]",
"yield",
"contig",
",",
"[",
"locus",
",",
"[",
"start",
",",
"end",
",",
"strand",
"]",
",",
"feature",
".",
"qualifiers",
"]"
] | parse gbk file | [
"parse",
"gbk",
"file"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/genome_variation.py#L186-L213 | train |
christophertbrown/bioscripts | ctbBio/genome_variation.py | parse_fasta_annotations | def parse_fasta_annotations(fastas, annot_tables, trans_table):
"""
parse gene call information from Prodigal fasta output
"""
if annot_tables is not False:
annots = {}
for table in annot_tables:
for cds in open(table):
ID, start, end, strand = cds.strip().split()
annots[ID] = [start, end, int(strand)]
for fasta in fastas:
for seq in parse_fasta(fasta):
if ('# ;gc_cont' not in seq[0] and '# ID=' not in seq[0]) and annot_tables is False:
print('# specify fasta from Prodigal or annotations table (-t)', file=sys.stderr)
exit()
if 'ID=' in seq[0]:
ID = seq[0].rsplit('ID=', 1)[1].split(';', 1)[0]
contig = seq[0].split()[0].split('>')[1].rsplit('_%s' % (ID), 1)[0]
else:
contig = seq[0].split()[0].split('>')[1].rsplit('_', 1)[0]
locus = seq[0].split()[0].split('>')[1]
# annotation info from Prodigal
if ('# ;gc_cont' in seq[0] or '# ID=' in seq[0]):
info = seq[0].split(' # ')
start, end, strand = int(info[1]), int(info[2]), info[3]
if strand == '1':
strand = 1
else:
strand = -1
product = [''.join(info[4].split()[1:])]
# annotation info from table
else:
start, end, strand = annots[locus]
product = seq[0].split(' ', 1)[1]
info = {'transl_table':[trans_table], \
'translation':[seq[1]], \
'product':product}
yield contig, [locus, [start, end, strand], info] | python | def parse_fasta_annotations(fastas, annot_tables, trans_table):
"""
parse gene call information from Prodigal fasta output
"""
if annot_tables is not False:
annots = {}
for table in annot_tables:
for cds in open(table):
ID, start, end, strand = cds.strip().split()
annots[ID] = [start, end, int(strand)]
for fasta in fastas:
for seq in parse_fasta(fasta):
if ('# ;gc_cont' not in seq[0] and '# ID=' not in seq[0]) and annot_tables is False:
print('# specify fasta from Prodigal or annotations table (-t)', file=sys.stderr)
exit()
if 'ID=' in seq[0]:
ID = seq[0].rsplit('ID=', 1)[1].split(';', 1)[0]
contig = seq[0].split()[0].split('>')[1].rsplit('_%s' % (ID), 1)[0]
else:
contig = seq[0].split()[0].split('>')[1].rsplit('_', 1)[0]
locus = seq[0].split()[0].split('>')[1]
# annotation info from Prodigal
if ('# ;gc_cont' in seq[0] or '# ID=' in seq[0]):
info = seq[0].split(' # ')
start, end, strand = int(info[1]), int(info[2]), info[3]
if strand == '1':
strand = 1
else:
strand = -1
product = [''.join(info[4].split()[1:])]
# annotation info from table
else:
start, end, strand = annots[locus]
product = seq[0].split(' ', 1)[1]
info = {'transl_table':[trans_table], \
'translation':[seq[1]], \
'product':product}
yield contig, [locus, [start, end, strand], info] | [
"def",
"parse_fasta_annotations",
"(",
"fastas",
",",
"annot_tables",
",",
"trans_table",
")",
":",
"if",
"annot_tables",
"is",
"not",
"False",
":",
"annots",
"=",
"{",
"}",
"for",
"table",
"in",
"annot_tables",
":",
"for",
"cds",
"in",
"open",
"(",
"table",
")",
":",
"ID",
",",
"start",
",",
"end",
",",
"strand",
"=",
"cds",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"annots",
"[",
"ID",
"]",
"=",
"[",
"start",
",",
"end",
",",
"int",
"(",
"strand",
")",
"]",
"for",
"fasta",
"in",
"fastas",
":",
"for",
"seq",
"in",
"parse_fasta",
"(",
"fasta",
")",
":",
"if",
"(",
"'# ;gc_cont'",
"not",
"in",
"seq",
"[",
"0",
"]",
"and",
"'# ID='",
"not",
"in",
"seq",
"[",
"0",
"]",
")",
"and",
"annot_tables",
"is",
"False",
":",
"print",
"(",
"'# specify fasta from Prodigal or annotations table (-t)'",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"exit",
"(",
")",
"if",
"'ID='",
"in",
"seq",
"[",
"0",
"]",
":",
"ID",
"=",
"seq",
"[",
"0",
"]",
".",
"rsplit",
"(",
"'ID='",
",",
"1",
")",
"[",
"1",
"]",
".",
"split",
"(",
"';'",
",",
"1",
")",
"[",
"0",
"]",
"contig",
"=",
"seq",
"[",
"0",
"]",
".",
"split",
"(",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'>'",
")",
"[",
"1",
"]",
".",
"rsplit",
"(",
"'_%s'",
"%",
"(",
"ID",
")",
",",
"1",
")",
"[",
"0",
"]",
"else",
":",
"contig",
"=",
"seq",
"[",
"0",
"]",
".",
"split",
"(",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'>'",
")",
"[",
"1",
"]",
".",
"rsplit",
"(",
"'_'",
",",
"1",
")",
"[",
"0",
"]",
"locus",
"=",
"seq",
"[",
"0",
"]",
".",
"split",
"(",
")",
"[",
"0",
"]",
".",
"split",
"(",
"'>'",
")",
"[",
"1",
"]",
"# annotation info from Prodigal",
"if",
"(",
"'# ;gc_cont'",
"in",
"seq",
"[",
"0",
"]",
"or",
"'# ID='",
"in",
"seq",
"[",
"0",
"]",
")",
":",
"info",
"=",
"seq",
"[",
"0",
"]",
".",
"split",
"(",
"' # '",
")",
"start",
",",
"end",
",",
"strand",
"=",
"int",
"(",
"info",
"[",
"1",
"]",
")",
",",
"int",
"(",
"info",
"[",
"2",
"]",
")",
",",
"info",
"[",
"3",
"]",
"if",
"strand",
"==",
"'1'",
":",
"strand",
"=",
"1",
"else",
":",
"strand",
"=",
"-",
"1",
"product",
"=",
"[",
"''",
".",
"join",
"(",
"info",
"[",
"4",
"]",
".",
"split",
"(",
")",
"[",
"1",
":",
"]",
")",
"]",
"# annotation info from table",
"else",
":",
"start",
",",
"end",
",",
"strand",
"=",
"annots",
"[",
"locus",
"]",
"product",
"=",
"seq",
"[",
"0",
"]",
".",
"split",
"(",
"' '",
",",
"1",
")",
"[",
"1",
"]",
"info",
"=",
"{",
"'transl_table'",
":",
"[",
"trans_table",
"]",
",",
"'translation'",
":",
"[",
"seq",
"[",
"1",
"]",
"]",
",",
"'product'",
":",
"product",
"}",
"yield",
"contig",
",",
"[",
"locus",
",",
"[",
"start",
",",
"end",
",",
"strand",
"]",
",",
"info",
"]"
] | parse gene call information from Prodigal fasta output | [
"parse",
"gene",
"call",
"information",
"from",
"Prodigal",
"fasta",
"output"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/genome_variation.py#L215-L252 | train |
christophertbrown/bioscripts | ctbBio/genome_variation.py | parse_annotations | def parse_annotations(annots, fmt, annot_tables, trans_table):
"""
parse annotations in either gbk or Prodigal fasta format
"""
annotations = {} # annotations[contig] = [features]
# gbk format
if fmt is False:
for contig, feature in parse_gbk(annots):
if contig not in annotations:
annotations[contig] = []
annotations[contig].append(feature)
# fasta format
else:
for contig, feature in parse_fasta_annotations(annots, annot_tables, trans_table):
if contig not in annotations:
annotations[contig] = []
annotations[contig].append(feature)
return annotations | python | def parse_annotations(annots, fmt, annot_tables, trans_table):
"""
parse annotations in either gbk or Prodigal fasta format
"""
annotations = {} # annotations[contig] = [features]
# gbk format
if fmt is False:
for contig, feature in parse_gbk(annots):
if contig not in annotations:
annotations[contig] = []
annotations[contig].append(feature)
# fasta format
else:
for contig, feature in parse_fasta_annotations(annots, annot_tables, trans_table):
if contig not in annotations:
annotations[contig] = []
annotations[contig].append(feature)
return annotations | [
"def",
"parse_annotations",
"(",
"annots",
",",
"fmt",
",",
"annot_tables",
",",
"trans_table",
")",
":",
"annotations",
"=",
"{",
"}",
"# annotations[contig] = [features]",
"# gbk format",
"if",
"fmt",
"is",
"False",
":",
"for",
"contig",
",",
"feature",
"in",
"parse_gbk",
"(",
"annots",
")",
":",
"if",
"contig",
"not",
"in",
"annotations",
":",
"annotations",
"[",
"contig",
"]",
"=",
"[",
"]",
"annotations",
"[",
"contig",
"]",
".",
"append",
"(",
"feature",
")",
"# fasta format",
"else",
":",
"for",
"contig",
",",
"feature",
"in",
"parse_fasta_annotations",
"(",
"annots",
",",
"annot_tables",
",",
"trans_table",
")",
":",
"if",
"contig",
"not",
"in",
"annotations",
":",
"annotations",
"[",
"contig",
"]",
"=",
"[",
"]",
"annotations",
"[",
"contig",
"]",
".",
"append",
"(",
"feature",
")",
"return",
"annotations"
] | parse annotations in either gbk or Prodigal fasta format | [
"parse",
"annotations",
"in",
"either",
"gbk",
"or",
"Prodigal",
"fasta",
"format"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/genome_variation.py#L254-L271 | train |
christophertbrown/bioscripts | ctbBio/genome_variation.py | codon2aa | def codon2aa(codon, trans_table):
"""
convert codon to amino acid
"""
return Seq(''.join(codon), IUPAC.ambiguous_dna).translate(table = trans_table)[0] | python | def codon2aa(codon, trans_table):
"""
convert codon to amino acid
"""
return Seq(''.join(codon), IUPAC.ambiguous_dna).translate(table = trans_table)[0] | [
"def",
"codon2aa",
"(",
"codon",
",",
"trans_table",
")",
":",
"return",
"Seq",
"(",
"''",
".",
"join",
"(",
"codon",
")",
",",
"IUPAC",
".",
"ambiguous_dna",
")",
".",
"translate",
"(",
"table",
"=",
"trans_table",
")",
"[",
"0",
"]"
] | convert codon to amino acid | [
"convert",
"codon",
"to",
"amino",
"acid"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/genome_variation.py#L311-L315 | train |
christophertbrown/bioscripts | ctbBio/genome_variation.py | find_consensus | def find_consensus(bases):
"""
find consensus base based on nucleotide
frequencies
"""
nucs = ['A', 'T', 'G', 'C', 'N']
total = sum([bases[nuc] for nuc in nucs if nuc in bases])
# save most common base as consensus (random nuc if there is a tie)
try:
top = max([bases[nuc] for nuc in nucs if nuc in bases])
except:
bases['consensus'] = ('N', 'n/a')
bases['consensus frequency'] = 'n/a'
bases['reference frequency'] = 'n/a'
return bases
top = [(nuc, bases[nuc]) for nuc in bases if bases[nuc] == top]
if top[0][1] == 0:
bases['consensus'] = ('n/a', 0)
else:
bases['consensus'] = random.choice(top)
if total == 0:
c_freq = 'n/a'
ref_freq = 'n/a'
else:
c_freq = float(bases['consensus'][1]) / float(total)
if bases['ref'] not in bases:
ref_freq = 0
else:
ref_freq = float(bases[bases['ref']]) / float(total)
bases['consensus frequency'] = c_freq
bases['reference frequency'] = ref_freq
return bases | python | def find_consensus(bases):
"""
find consensus base based on nucleotide
frequencies
"""
nucs = ['A', 'T', 'G', 'C', 'N']
total = sum([bases[nuc] for nuc in nucs if nuc in bases])
# save most common base as consensus (random nuc if there is a tie)
try:
top = max([bases[nuc] for nuc in nucs if nuc in bases])
except:
bases['consensus'] = ('N', 'n/a')
bases['consensus frequency'] = 'n/a'
bases['reference frequency'] = 'n/a'
return bases
top = [(nuc, bases[nuc]) for nuc in bases if bases[nuc] == top]
if top[0][1] == 0:
bases['consensus'] = ('n/a', 0)
else:
bases['consensus'] = random.choice(top)
if total == 0:
c_freq = 'n/a'
ref_freq = 'n/a'
else:
c_freq = float(bases['consensus'][1]) / float(total)
if bases['ref'] not in bases:
ref_freq = 0
else:
ref_freq = float(bases[bases['ref']]) / float(total)
bases['consensus frequency'] = c_freq
bases['reference frequency'] = ref_freq
return bases | [
"def",
"find_consensus",
"(",
"bases",
")",
":",
"nucs",
"=",
"[",
"'A'",
",",
"'T'",
",",
"'G'",
",",
"'C'",
",",
"'N'",
"]",
"total",
"=",
"sum",
"(",
"[",
"bases",
"[",
"nuc",
"]",
"for",
"nuc",
"in",
"nucs",
"if",
"nuc",
"in",
"bases",
"]",
")",
"# save most common base as consensus (random nuc if there is a tie)",
"try",
":",
"top",
"=",
"max",
"(",
"[",
"bases",
"[",
"nuc",
"]",
"for",
"nuc",
"in",
"nucs",
"if",
"nuc",
"in",
"bases",
"]",
")",
"except",
":",
"bases",
"[",
"'consensus'",
"]",
"=",
"(",
"'N'",
",",
"'n/a'",
")",
"bases",
"[",
"'consensus frequency'",
"]",
"=",
"'n/a'",
"bases",
"[",
"'reference frequency'",
"]",
"=",
"'n/a'",
"return",
"bases",
"top",
"=",
"[",
"(",
"nuc",
",",
"bases",
"[",
"nuc",
"]",
")",
"for",
"nuc",
"in",
"bases",
"if",
"bases",
"[",
"nuc",
"]",
"==",
"top",
"]",
"if",
"top",
"[",
"0",
"]",
"[",
"1",
"]",
"==",
"0",
":",
"bases",
"[",
"'consensus'",
"]",
"=",
"(",
"'n/a'",
",",
"0",
")",
"else",
":",
"bases",
"[",
"'consensus'",
"]",
"=",
"random",
".",
"choice",
"(",
"top",
")",
"if",
"total",
"==",
"0",
":",
"c_freq",
"=",
"'n/a'",
"ref_freq",
"=",
"'n/a'",
"else",
":",
"c_freq",
"=",
"float",
"(",
"bases",
"[",
"'consensus'",
"]",
"[",
"1",
"]",
")",
"/",
"float",
"(",
"total",
")",
"if",
"bases",
"[",
"'ref'",
"]",
"not",
"in",
"bases",
":",
"ref_freq",
"=",
"0",
"else",
":",
"ref_freq",
"=",
"float",
"(",
"bases",
"[",
"bases",
"[",
"'ref'",
"]",
"]",
")",
"/",
"float",
"(",
"total",
")",
"bases",
"[",
"'consensus frequency'",
"]",
"=",
"c_freq",
"bases",
"[",
"'reference frequency'",
"]",
"=",
"ref_freq",
"return",
"bases"
] | find consensus base based on nucleotide
frequencies | [
"find",
"consensus",
"base",
"based",
"on",
"nucleotide",
"frequencies"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/genome_variation.py#L371-L402 | train |
christophertbrown/bioscripts | ctbBio/genome_variation.py | print_consensus | def print_consensus(genomes):
"""
print consensensus sequences for each genome and sample
"""
# generate consensus sequences
cons = {} # cons[genome][sample][contig] = consensus
for genome, contigs in list(genomes.items()):
cons[genome] = {}
for contig, samples in list(contigs.items()):
for sample, stats in list(samples.items()):
if sample not in cons[genome]:
cons[genome][sample] = {}
seq = cons[genome][sample][contig] = []
for pos, ps in enumerate(stats['bp_stats'], 1):
ref, consensus = ps['ref'], ps['consensus'][0]
if consensus == 'n/a':
consensus = ref.lower()
seq.append(consensus)
# print consensus sequences
for genome, samples in cons.items():
for sample, contigs in samples.items():
fn = '%s.%s.consensus.fa' % (genome, sample)
f = open(fn, 'w')
for contig, seq in contigs.items():
print('>%s' % (contig), file = f)
print(''.join(seq), file = f)
f.close()
return cons | python | def print_consensus(genomes):
"""
print consensensus sequences for each genome and sample
"""
# generate consensus sequences
cons = {} # cons[genome][sample][contig] = consensus
for genome, contigs in list(genomes.items()):
cons[genome] = {}
for contig, samples in list(contigs.items()):
for sample, stats in list(samples.items()):
if sample not in cons[genome]:
cons[genome][sample] = {}
seq = cons[genome][sample][contig] = []
for pos, ps in enumerate(stats['bp_stats'], 1):
ref, consensus = ps['ref'], ps['consensus'][0]
if consensus == 'n/a':
consensus = ref.lower()
seq.append(consensus)
# print consensus sequences
for genome, samples in cons.items():
for sample, contigs in samples.items():
fn = '%s.%s.consensus.fa' % (genome, sample)
f = open(fn, 'w')
for contig, seq in contigs.items():
print('>%s' % (contig), file = f)
print(''.join(seq), file = f)
f.close()
return cons | [
"def",
"print_consensus",
"(",
"genomes",
")",
":",
"# generate consensus sequences",
"cons",
"=",
"{",
"}",
"# cons[genome][sample][contig] = consensus",
"for",
"genome",
",",
"contigs",
"in",
"list",
"(",
"genomes",
".",
"items",
"(",
")",
")",
":",
"cons",
"[",
"genome",
"]",
"=",
"{",
"}",
"for",
"contig",
",",
"samples",
"in",
"list",
"(",
"contigs",
".",
"items",
"(",
")",
")",
":",
"for",
"sample",
",",
"stats",
"in",
"list",
"(",
"samples",
".",
"items",
"(",
")",
")",
":",
"if",
"sample",
"not",
"in",
"cons",
"[",
"genome",
"]",
":",
"cons",
"[",
"genome",
"]",
"[",
"sample",
"]",
"=",
"{",
"}",
"seq",
"=",
"cons",
"[",
"genome",
"]",
"[",
"sample",
"]",
"[",
"contig",
"]",
"=",
"[",
"]",
"for",
"pos",
",",
"ps",
"in",
"enumerate",
"(",
"stats",
"[",
"'bp_stats'",
"]",
",",
"1",
")",
":",
"ref",
",",
"consensus",
"=",
"ps",
"[",
"'ref'",
"]",
",",
"ps",
"[",
"'consensus'",
"]",
"[",
"0",
"]",
"if",
"consensus",
"==",
"'n/a'",
":",
"consensus",
"=",
"ref",
".",
"lower",
"(",
")",
"seq",
".",
"append",
"(",
"consensus",
")",
"# print consensus sequences",
"for",
"genome",
",",
"samples",
"in",
"cons",
".",
"items",
"(",
")",
":",
"for",
"sample",
",",
"contigs",
"in",
"samples",
".",
"items",
"(",
")",
":",
"fn",
"=",
"'%s.%s.consensus.fa'",
"%",
"(",
"genome",
",",
"sample",
")",
"f",
"=",
"open",
"(",
"fn",
",",
"'w'",
")",
"for",
"contig",
",",
"seq",
"in",
"contigs",
".",
"items",
"(",
")",
":",
"print",
"(",
"'>%s'",
"%",
"(",
"contig",
")",
",",
"file",
"=",
"f",
")",
"print",
"(",
"''",
".",
"join",
"(",
"seq",
")",
",",
"file",
"=",
"f",
")",
"f",
".",
"close",
"(",
")",
"return",
"cons"
] | print consensensus sequences for each genome and sample | [
"print",
"consensensus",
"sequences",
"for",
"each",
"genome",
"and",
"sample"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/genome_variation.py#L451-L478 | train |
christophertbrown/bioscripts | ctbBio/genome_coverage.py | parse_cov | def parse_cov(cov_table, scaffold2genome):
"""
calculate genome coverage from scaffold coverage table
"""
size = {} # size[genome] = genome size
mapped = {} # mapped[genome][sample] = mapped bases
# parse coverage files
for line in open(cov_table):
line = line.strip().split('\t')
if line[0].startswith('#'):
samples = line[1:]
samples = [i.rsplit('/', 1)[-1].split('.', 1)[0] for i in samples]
continue
scaffold, length = line[0].split(': ')
length = float(length)
covs = [float(i) for i in line[1:]]
bases = [c * length for c in covs]
if scaffold not in scaffold2genome:
continue
genome = scaffold2genome[scaffold]
if genome not in size:
size[genome] = 0
mapped[genome] = {sample:0 for sample in samples}
# keep track of genome size
size[genome] += length
# keep track of number of mapped bases
for sample, count in zip(samples, bases):
mapped[genome][sample] += count
# calculate coverage from base counts and genome size
coverage = {'genome':[], 'genome size (bp)':[], 'sample':[], 'coverage':[]}
for genome, length in size.items():
for sample in samples:
cov = mapped[genome][sample] / length
coverage['genome'].append(genome)
coverage['genome size (bp)'].append(length)
coverage['sample'].append(sample)
coverage['coverage'].append(cov)
return pd.DataFrame(coverage) | python | def parse_cov(cov_table, scaffold2genome):
"""
calculate genome coverage from scaffold coverage table
"""
size = {} # size[genome] = genome size
mapped = {} # mapped[genome][sample] = mapped bases
# parse coverage files
for line in open(cov_table):
line = line.strip().split('\t')
if line[0].startswith('#'):
samples = line[1:]
samples = [i.rsplit('/', 1)[-1].split('.', 1)[0] for i in samples]
continue
scaffold, length = line[0].split(': ')
length = float(length)
covs = [float(i) for i in line[1:]]
bases = [c * length for c in covs]
if scaffold not in scaffold2genome:
continue
genome = scaffold2genome[scaffold]
if genome not in size:
size[genome] = 0
mapped[genome] = {sample:0 for sample in samples}
# keep track of genome size
size[genome] += length
# keep track of number of mapped bases
for sample, count in zip(samples, bases):
mapped[genome][sample] += count
# calculate coverage from base counts and genome size
coverage = {'genome':[], 'genome size (bp)':[], 'sample':[], 'coverage':[]}
for genome, length in size.items():
for sample in samples:
cov = mapped[genome][sample] / length
coverage['genome'].append(genome)
coverage['genome size (bp)'].append(length)
coverage['sample'].append(sample)
coverage['coverage'].append(cov)
return pd.DataFrame(coverage) | [
"def",
"parse_cov",
"(",
"cov_table",
",",
"scaffold2genome",
")",
":",
"size",
"=",
"{",
"}",
"# size[genome] = genome size",
"mapped",
"=",
"{",
"}",
"# mapped[genome][sample] = mapped bases",
"# parse coverage files",
"for",
"line",
"in",
"open",
"(",
"cov_table",
")",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\t'",
")",
"if",
"line",
"[",
"0",
"]",
".",
"startswith",
"(",
"'#'",
")",
":",
"samples",
"=",
"line",
"[",
"1",
":",
"]",
"samples",
"=",
"[",
"i",
".",
"rsplit",
"(",
"'/'",
",",
"1",
")",
"[",
"-",
"1",
"]",
".",
"split",
"(",
"'.'",
",",
"1",
")",
"[",
"0",
"]",
"for",
"i",
"in",
"samples",
"]",
"continue",
"scaffold",
",",
"length",
"=",
"line",
"[",
"0",
"]",
".",
"split",
"(",
"': '",
")",
"length",
"=",
"float",
"(",
"length",
")",
"covs",
"=",
"[",
"float",
"(",
"i",
")",
"for",
"i",
"in",
"line",
"[",
"1",
":",
"]",
"]",
"bases",
"=",
"[",
"c",
"*",
"length",
"for",
"c",
"in",
"covs",
"]",
"if",
"scaffold",
"not",
"in",
"scaffold2genome",
":",
"continue",
"genome",
"=",
"scaffold2genome",
"[",
"scaffold",
"]",
"if",
"genome",
"not",
"in",
"size",
":",
"size",
"[",
"genome",
"]",
"=",
"0",
"mapped",
"[",
"genome",
"]",
"=",
"{",
"sample",
":",
"0",
"for",
"sample",
"in",
"samples",
"}",
"# keep track of genome size",
"size",
"[",
"genome",
"]",
"+=",
"length",
"# keep track of number of mapped bases",
"for",
"sample",
",",
"count",
"in",
"zip",
"(",
"samples",
",",
"bases",
")",
":",
"mapped",
"[",
"genome",
"]",
"[",
"sample",
"]",
"+=",
"count",
"# calculate coverage from base counts and genome size",
"coverage",
"=",
"{",
"'genome'",
":",
"[",
"]",
",",
"'genome size (bp)'",
":",
"[",
"]",
",",
"'sample'",
":",
"[",
"]",
",",
"'coverage'",
":",
"[",
"]",
"}",
"for",
"genome",
",",
"length",
"in",
"size",
".",
"items",
"(",
")",
":",
"for",
"sample",
"in",
"samples",
":",
"cov",
"=",
"mapped",
"[",
"genome",
"]",
"[",
"sample",
"]",
"/",
"length",
"coverage",
"[",
"'genome'",
"]",
".",
"append",
"(",
"genome",
")",
"coverage",
"[",
"'genome size (bp)'",
"]",
".",
"append",
"(",
"length",
")",
"coverage",
"[",
"'sample'",
"]",
".",
"append",
"(",
"sample",
")",
"coverage",
"[",
"'coverage'",
"]",
".",
"append",
"(",
"cov",
")",
"return",
"pd",
".",
"DataFrame",
"(",
"coverage",
")"
] | calculate genome coverage from scaffold coverage table | [
"calculate",
"genome",
"coverage",
"from",
"scaffold",
"coverage",
"table"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/genome_coverage.py#L13-L50 | train |
christophertbrown/bioscripts | ctbBio/genome_coverage.py | genome_coverage | def genome_coverage(covs, s2b):
"""
calculate genome coverage from scaffold coverage
"""
COV = []
for cov in covs:
COV.append(parse_cov(cov, s2b))
return pd.concat(COV) | python | def genome_coverage(covs, s2b):
"""
calculate genome coverage from scaffold coverage
"""
COV = []
for cov in covs:
COV.append(parse_cov(cov, s2b))
return pd.concat(COV) | [
"def",
"genome_coverage",
"(",
"covs",
",",
"s2b",
")",
":",
"COV",
"=",
"[",
"]",
"for",
"cov",
"in",
"covs",
":",
"COV",
".",
"append",
"(",
"parse_cov",
"(",
"cov",
",",
"s2b",
")",
")",
"return",
"pd",
".",
"concat",
"(",
"COV",
")"
] | calculate genome coverage from scaffold coverage | [
"calculate",
"genome",
"coverage",
"from",
"scaffold",
"coverage"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/genome_coverage.py#L52-L59 | train |
christophertbrown/bioscripts | ctbBio/genome_coverage.py | parse_s2bs | def parse_s2bs(s2bs):
"""
convert s2b files to dictionary
"""
s2b = {}
for s in s2bs:
for line in open(s):
line = line.strip().split('\t')
s, b = line[0], line[1]
s2b[s] = b
return s2b | python | def parse_s2bs(s2bs):
"""
convert s2b files to dictionary
"""
s2b = {}
for s in s2bs:
for line in open(s):
line = line.strip().split('\t')
s, b = line[0], line[1]
s2b[s] = b
return s2b | [
"def",
"parse_s2bs",
"(",
"s2bs",
")",
":",
"s2b",
"=",
"{",
"}",
"for",
"s",
"in",
"s2bs",
":",
"for",
"line",
"in",
"open",
"(",
"s",
")",
":",
"line",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\t'",
")",
"s",
",",
"b",
"=",
"line",
"[",
"0",
"]",
",",
"line",
"[",
"1",
"]",
"s2b",
"[",
"s",
"]",
"=",
"b",
"return",
"s2b"
] | convert s2b files to dictionary | [
"convert",
"s2b",
"files",
"to",
"dictionary"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/genome_coverage.py#L61-L71 | train |
christophertbrown/bioscripts | ctbBio/genome_coverage.py | fa2s2b | def fa2s2b(fastas):
"""
convert fastas to s2b dictionary
"""
s2b = {}
for fa in fastas:
for seq in parse_fasta(fa):
s = seq[0].split('>', 1)[1].split()[0]
s2b[s] = fa.rsplit('/', 1)[-1].rsplit('.', 1)[0]
return s2b | python | def fa2s2b(fastas):
"""
convert fastas to s2b dictionary
"""
s2b = {}
for fa in fastas:
for seq in parse_fasta(fa):
s = seq[0].split('>', 1)[1].split()[0]
s2b[s] = fa.rsplit('/', 1)[-1].rsplit('.', 1)[0]
return s2b | [
"def",
"fa2s2b",
"(",
"fastas",
")",
":",
"s2b",
"=",
"{",
"}",
"for",
"fa",
"in",
"fastas",
":",
"for",
"seq",
"in",
"parse_fasta",
"(",
"fa",
")",
":",
"s",
"=",
"seq",
"[",
"0",
"]",
".",
"split",
"(",
"'>'",
",",
"1",
")",
"[",
"1",
"]",
".",
"split",
"(",
")",
"[",
"0",
"]",
"s2b",
"[",
"s",
"]",
"=",
"fa",
".",
"rsplit",
"(",
"'/'",
",",
"1",
")",
"[",
"-",
"1",
"]",
".",
"rsplit",
"(",
"'.'",
",",
"1",
")",
"[",
"0",
"]",
"return",
"s2b"
] | convert fastas to s2b dictionary | [
"convert",
"fastas",
"to",
"s2b",
"dictionary"
] | 83b2566b3a5745437ec651cd6cafddd056846240 | https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/genome_coverage.py#L73-L82 | train |
smdabdoub/phylotoast | bin/filter_ambiguity.py | filter_ambiguity | def filter_ambiguity(records, percent=0.5): # , repeats=6)
"""
Filters out sequences with too much ambiguity as defined by the method
parameters.
:type records: list
:param records: A list of sequences
:type repeats: int
:param repeats: Defines the number of repeated N that trigger truncating a
sequence.
:type percent: float
:param percent: Defines the overall percentage of N in a sequence that
will cause the sequence to be filtered out.
"""
seqs = []
# Ns = ''.join(['N' for _ in range(repeats)])
count = 0
for record in records:
if record.seq.count('N')/float(len(record)) < percent:
# pos = record.seq.find(Ns)
# if pos >= 0:
# record.seq = Seq(str(record.seq)[:pos])
seqs.append(record)
count += 1
return seqs, count | python | def filter_ambiguity(records, percent=0.5): # , repeats=6)
"""
Filters out sequences with too much ambiguity as defined by the method
parameters.
:type records: list
:param records: A list of sequences
:type repeats: int
:param repeats: Defines the number of repeated N that trigger truncating a
sequence.
:type percent: float
:param percent: Defines the overall percentage of N in a sequence that
will cause the sequence to be filtered out.
"""
seqs = []
# Ns = ''.join(['N' for _ in range(repeats)])
count = 0
for record in records:
if record.seq.count('N')/float(len(record)) < percent:
# pos = record.seq.find(Ns)
# if pos >= 0:
# record.seq = Seq(str(record.seq)[:pos])
seqs.append(record)
count += 1
return seqs, count | [
"def",
"filter_ambiguity",
"(",
"records",
",",
"percent",
"=",
"0.5",
")",
":",
"# , repeats=6)",
"seqs",
"=",
"[",
"]",
"# Ns = ''.join(['N' for _ in range(repeats)])",
"count",
"=",
"0",
"for",
"record",
"in",
"records",
":",
"if",
"record",
".",
"seq",
".",
"count",
"(",
"'N'",
")",
"/",
"float",
"(",
"len",
"(",
"record",
")",
")",
"<",
"percent",
":",
"# pos = record.seq.find(Ns)",
"# if pos >= 0:",
"# record.seq = Seq(str(record.seq)[:pos])",
"seqs",
".",
"append",
"(",
"record",
")",
"count",
"+=",
"1",
"return",
"seqs",
",",
"count"
] | Filters out sequences with too much ambiguity as defined by the method
parameters.
:type records: list
:param records: A list of sequences
:type repeats: int
:param repeats: Defines the number of repeated N that trigger truncating a
sequence.
:type percent: float
:param percent: Defines the overall percentage of N in a sequence that
will cause the sequence to be filtered out. | [
"Filters",
"out",
"sequences",
"with",
"too",
"much",
"ambiguity",
"as",
"defined",
"by",
"the",
"method",
"parameters",
"."
] | 0b74ef171e6a84761710548501dfac71285a58a3 | https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/filter_ambiguity.py#L16-L41 | train |
mkouhei/bootstrap-py | bootstrap_py/pypi.py | package_existent | def package_existent(name):
"""Search package.
* :class:`bootstrap_py.exceptions.Conflict` exception occurs
when user specified name has already existed.
* :class:`bootstrap_py.exceptions.BackendFailure` exception occurs
when PyPI service is down.
:param str name: package name
"""
try:
response = requests.get(PYPI_URL.format(name))
if response.ok:
msg = ('[error] "{0}" is registered already in PyPI.\n'
'\tSpecify another package name.').format(name)
raise Conflict(msg)
except (socket.gaierror,
Timeout,
ConnectionError,
HTTPError) as exc:
raise BackendFailure(exc) | python | def package_existent(name):
"""Search package.
* :class:`bootstrap_py.exceptions.Conflict` exception occurs
when user specified name has already existed.
* :class:`bootstrap_py.exceptions.BackendFailure` exception occurs
when PyPI service is down.
:param str name: package name
"""
try:
response = requests.get(PYPI_URL.format(name))
if response.ok:
msg = ('[error] "{0}" is registered already in PyPI.\n'
'\tSpecify another package name.').format(name)
raise Conflict(msg)
except (socket.gaierror,
Timeout,
ConnectionError,
HTTPError) as exc:
raise BackendFailure(exc) | [
"def",
"package_existent",
"(",
"name",
")",
":",
"try",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"PYPI_URL",
".",
"format",
"(",
"name",
")",
")",
"if",
"response",
".",
"ok",
":",
"msg",
"=",
"(",
"'[error] \"{0}\" is registered already in PyPI.\\n'",
"'\\tSpecify another package name.'",
")",
".",
"format",
"(",
"name",
")",
"raise",
"Conflict",
"(",
"msg",
")",
"except",
"(",
"socket",
".",
"gaierror",
",",
"Timeout",
",",
"ConnectionError",
",",
"HTTPError",
")",
"as",
"exc",
":",
"raise",
"BackendFailure",
"(",
"exc",
")"
] | Search package.
* :class:`bootstrap_py.exceptions.Conflict` exception occurs
when user specified name has already existed.
* :class:`bootstrap_py.exceptions.BackendFailure` exception occurs
when PyPI service is down.
:param str name: package name | [
"Search",
"package",
"."
] | 95d56ed98ef409fd9f019dc352fd1c3711533275 | https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/pypi.py#L12-L33 | train |