desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'__init__: This has to be present'
def __init__(self):
self.a = 1 def XXX22(): 'XXX22: This has to be present' pass
'XXX22: This has to be present'
def XXX11():
pass
'Remove recursive references to allow garbage collector to collect this object.'
def cleanup(self):
for dict in (self.rule2func, self.rules, self.rule2name): for i in dict.keys(): dict[i] = None for i in dir(self): setattr(self, i, None)
'Disassemble a code object, returning a list of \'Token\'. The main part of this procedure is modelled after dis.disassemble().'
def disassemble(self, co, classname=None, deob=0):
rv = [] customize = {} Token = self.Token self.code = array('B', co.co_code) linestarts = list(dis.findlinestarts(co)) varnames = list(co.co_varnames) if deob: linestarts = self.deobfuscate(co, linestarts, varnames) code = self.code n = len(code) self.prev = [0] for i in self.op_range(0, n): op = code[i] self.prev.append(i) if (op >= HAVE_ARGUMENT): self.prev.append(i) self.prev.append(i) self.lines = [] linetuple = namedtuple('linetuple', ['l_no', 'next']) j = 0 linestartoffsets = {a for (a, _) in linestarts} (prev_start_byte, prev_line_no) = linestarts[0] for (start_byte, line_no) in linestarts[1:]: while (j < start_byte): self.lines.append(linetuple(prev_line_no, start_byte)) j += 1 last_op = code[self.prev[start_byte]] (prev_start_byte, prev_line_no) = (start_byte, line_no) while (j < n): self.lines.append(linetuple(prev_line_no, n)) j += 1 if classname: classname = (('_' + classname.lstrip('_')) + '__') def unmangle(name): if (name.startswith(classname) and (name[(-2):] != '__')): return name[(len(classname) - 2):] return name free = [unmangle(name) for name in (co.co_cellvars + co.co_freevars)] names = [unmangle(name) for name in co.co_names] varnames = [unmangle(name) for name in varnames] else: free = (co.co_cellvars + co.co_freevars) names = co.co_names self.load_asserts = set() for i in self.op_range(0, n): if ((code[i] == PJIT) and (code[(i + 3)] == LOAD_GLOBAL)): if (names[(code[(i + 4)] + (256 * code[(i + 5)]))] == 'AssertionError'): self.load_asserts.add((i + 3)) cf = self.find_jump_targets(code) last_stmt = self.next_stmt[0] i = self.next_stmt[last_stmt] replace = {} while (i < (n - 1)): if (self.lines[last_stmt].next > i): if (code[last_stmt] == PRINT_ITEM): if (code[i] == PRINT_ITEM): replace[i] = 'PRINT_ITEM_CONT' elif (code[i] == PRINT_NEWLINE): replace[i] = 'PRINT_NEWLINE_CONT' last_stmt = i i = self.next_stmt[i] imports = self.all_instr(0, n, (IMPORT_NAME, IMPORT_FROM, IMPORT_STAR)) if (len(imports) > 1): last_import = imports[0] for i in imports[1:]: if (self.lines[last_import].next > i): if (code[last_import] == IMPORT_NAME == code[i]): replace[i] = 'IMPORT_NAME_CONT' last_import = i extended_arg = 0 for offset in self.op_range(0, n): if (offset in cf): k = 0 for j in cf[offset]: rv.append(Token('COME_FROM', None, repr(j), offset=('%s_%d' % (offset, k)))) k += 1 op = code[offset] opname = dis.opname[op] oparg = None pattr = None if (op >= HAVE_ARGUMENT): oparg = ((code[(offset + 1)] + (code[(offset + 2)] * 256)) + extended_arg) extended_arg = 0 if (op == dis.EXTENDED_ARG): extended_arg = (oparg * 65536L) continue if (op in dis.hasconst): const = co.co_consts[oparg] if (type(const) == types.CodeType): oparg = const if (const.co_name == '<lambda>'): assert (opname == 'LOAD_CONST') opname = 'LOAD_LAMBDA' elif (const.co_name == '<genexpr>'): opname = 'LOAD_GENEXPR' elif (const.co_name == '<dictcomp>'): opname = 'LOAD_DICTCOMP' elif (const.co_name == '<setcomp>'): opname = 'LOAD_SETCOMP' pattr = (('<code_object ' + const.co_name) + '>') else: pattr = const elif (op in dis.hasname): pattr = names[oparg] elif (op in dis.hasjrel): pattr = repr(((offset + 3) + oparg)) elif (op in dis.hasjabs): pattr = repr(oparg) elif (op in dis.haslocal): pattr = varnames[oparg] elif (op in dis.hascompare): pattr = dis.cmp_op[oparg] elif (op in dis.hasfree): pattr = free[oparg] if (op in (BUILD_LIST, BUILD_TUPLE, BUILD_SET, BUILD_SLICE, UNPACK_SEQUENCE, MAKE_FUNCTION, CALL_FUNCTION, MAKE_CLOSURE, CALL_FUNCTION_VAR, CALL_FUNCTION_KW, CALL_FUNCTION_VAR_KW, DUP_TOPX, RAISE_VARARGS)): if ((op == BUILD_TUPLE) and (code[self.prev[offset]] == LOAD_CLOSURE)): continue else: opname = ('%s_%d' % (opname, oparg)) if (op != BUILD_SLICE): customize[opname] = oparg elif (op == JA): target = self.get_target(offset) if (target < offset): if ((offset in self.stmts) and (code[(offset + 3)] not in (END_FINALLY, POP_BLOCK)) and (offset not in self.not_continue)): opname = 'CONTINUE' else: opname = 'JUMP_BACK' elif (op == LOAD_GLOBAL): if (offset in self.load_asserts): opname = 'LOAD_ASSERT' elif (op == RETURN_VALUE): if (offset in self.return_end_ifs): opname = 'RETURN_END_IF' if (offset not in replace): rv.append(Token(opname, oparg, pattr, offset, linestart=(offset in linestartoffsets))) else: rv.append(Token(replace[offset], oparg, pattr, offset, linestart=(offset in linestartoffsets))) if self.showasm: out = self.out for t in rv: print >>out, t print >>out return (rv, customize)
'Find the first <instr> in the block from start to end. <instr> is any python bytecode instruction or a list of opcodes If <instr> is an opcode with a target (like a jump), a target destination can be specified which must match precisely if exact is True, or if exact is False, the instruction which has a target closest to <target> will be returned. Return index to it or None if not found.'
def first_instr(self, start, end, instr, target=None, exact=True):
code = self.code assert ((start >= 0) and (end <= len(code))) try: (None in instr) except: instr = [instr] pos = None distance = len(code) for i in self.op_range(start, end): op = code[i] if (op in instr): if (target is None): return i dest = self.get_target(i, op) if (dest == target): return i elif (not exact): _distance = abs((target - dest)) if (_distance < distance): distance = _distance pos = i return pos
'Find the last <instr> in the block from start to end. <instr> is any python bytecode instruction or a list of opcodes If <instr> is an opcode with a target (like a jump), a target destination can be specified which must match precisely if exact is True, or if exact is False, the instruction which has a target closest to <target> will be returned. Return index to it or None if not found.'
def last_instr(self, start, end, instr, target=None, exact=True):
code = self.code if (not ((start >= 0) and (end <= len(code)))): return None try: (None in instr) except: instr = [instr] pos = None distance = len(code) for i in self.op_range(start, end): op = code[i] if (op in instr): if (target is None): pos = i else: dest = self.get_target(i, op) if (dest == target): distance = 0 pos = i elif (not exact): _distance = abs((target - dest)) if (_distance <= distance): distance = _distance pos = i return pos
'Find all <instr> in the block from start to end. <instr> is any python bytecode instruction or a list of opcodes If <instr> is an opcode with a target (like a jump), a target destination can be specified which must match precisely. Return a list with indexes to them or [] if none found.'
def all_instr(self, start, end, instr, target=None, include_beyond_target=False):
code = self.code assert ((start >= 0) and (end <= len(code))) try: (None in instr) except: instr = [instr] result = [] for i in self.op_range(start, end): op = code[i] if (op in instr): if (target is None): result.append(i) else: t = self.get_target(i, op) if (include_beyond_target and (t >= target)): result.append(i) elif (t == target): result.append(i) return result
'Find all <instr> in the block from start to end. <instr> is any python bytecode instruction or a list of opcodes If <instr> is an opcode with a target (like a jump), a target destination can be specified which must match precisely. Return a list with indexes to them or [] if none found.'
def rem_or(self, start, end, instr, target=None, include_beyond_target=False):
code = self.code assert ((start >= 0) and (end <= len(code))) try: (None in instr) except: instr = [instr] result = [] for i in self.op_range(start, end): op = code[i] if (op in instr): if (target is None): result.append(i) else: t = self.get_target(i, op) if (include_beyond_target and (t >= target)): result.append(i) elif (t == target): result.append(i) pjits = self.all_instr(start, end, PJIT) filtered = [] for pjit in pjits: tgt = (self.get_target(pjit) - 3) for i in result: if ((i <= pjit) or (i >= tgt)): filtered.append(i) result = filtered filtered = [] return result
'Return the next jump that was generated by an except SomeException: construct in a try...except...else clause or None if not found.'
def next_except_jump(self, start):
if (self.code[start] == DUP_TOP): except_match = self.first_instr(start, len(self.code), POP_JUMP_IF_FALSE) if except_match: jmp = self.prev[self.get_target(except_match)] self.ignore_if.add(except_match) self.not_continue.add(jmp) return jmp count_END_FINALLY = 0 count_SETUP_ = 0 for i in self.op_range(start, len(self.code)): op = self.code[i] if (op == END_FINALLY): if (count_END_FINALLY == count_SETUP_): assert (self.code[self.prev[i]] in (JA, JF, RETURN_VALUE)) self.not_continue.add(self.prev[i]) return self.prev[i] count_END_FINALLY += 1 elif (op in (SETUP_EXCEPT, SETUP_WITH, SETUP_FINALLY)): count_SETUP_ += 1
'Restrict pos to parent boundaries.'
def restrict_to_parent(self, target, parent):
if (not (parent['start'] < target < parent['end'])): target = parent['end'] return target
'Detect structures and their boundaries to fix optimizied jumps in python2.3+'
def detect_structure(self, pos, op=None):
code = self.code if (op is None): op = code[pos] parent = self.structs[0] start = parent['start'] end = parent['end'] for s in self.structs: _start = s['start'] _end = s['end'] if ((_start <= pos < _end) and ((_start >= start) and (_end <= end))): start = _start end = _end parent = s origStructCount = len(self.structs) if (op == SETUP_LOOP): start = (pos + 3) target = self.get_target(pos, op) end = self.restrict_to_parent(target, parent) if (target != end): self.fixed_jumps[pos] = end (line_no, next_line_byte) = self.lines[pos] jump_back = self.last_instr(start, end, JA, next_line_byte, False) if (jump_back and (jump_back != self.prev[end]) and (code[(jump_back + 3)] in (JA, JF))): if ((code[self.prev[end]] == RETURN_VALUE) or ((code[self.prev[end]] == POP_BLOCK) and (code[self.prev[self.prev[end]]] == RETURN_VALUE))): jump_back = None if (not jump_back): jump_back = (self.last_instr(start, end, RETURN_VALUE) + 1) if (not jump_back): return if (code[self.prev[next_line_byte]] not in (PJIF, PJIT)): loop_type = 'for' else: loop_type = 'while' self.ignore_if.add(self.prev[next_line_byte]) target = next_line_byte end = (jump_back + 3) else: if (self.get_target(jump_back) >= next_line_byte): jump_back = self.last_instr(start, end, JA, start, False) if ((end > (jump_back + 4)) and (code[end] in (JF, JA))): if (code[(jump_back + 4)] in (JA, JF)): if (self.get_target((jump_back + 4)) == self.get_target(end)): self.fixed_jumps[pos] = (jump_back + 4) end = (jump_back + 4) elif (target < pos): self.fixed_jumps[pos] = (jump_back + 4) end = (jump_back + 4) target = self.get_target(jump_back, JA) if (code[target] in (FOR_ITER, GET_ITER)): loop_type = 'for' else: loop_type = 'while' test = self.prev[next_line_byte] if (test == pos): loop_type = 'while 1' else: self.ignore_if.add(test) test_target = self.get_target(test) if (test_target > (jump_back + 3)): jump_back = test_target self.not_continue.add(jump_back) self.loops.append(target) self.structs.append({'type': (loop_type + '-loop'), 'start': target, 'end': jump_back}) if ((jump_back + 3) != end): self.structs.append({'type': (loop_type + '-else'), 'start': (jump_back + 3), 'end': end}) elif (op == SETUP_EXCEPT): start = (pos + 3) target = self.get_target(pos, op) end = self.restrict_to_parent(target, parent) if (target != end): self.fixed_jumps[pos] = end self.structs.append({'type': 'try', 'start': start, 'end': (end - 4)}) end_else = start_else = self.get_target(self.prev[end]) i = end while (self.code[i] != END_FINALLY): jmp = self.next_except_jump(i) if (self.code[jmp] == RETURN_VALUE): self.structs.append({'type': 'except', 'start': i, 'end': (jmp + 1)}) i = (jmp + 1) else: if (self.get_target(jmp) != start_else): end_else = self.get_target(jmp) if (self.code[jmp] == JF): self.fixed_jumps[jmp] = (-1) self.structs.append({'type': 'except', 'start': i, 'end': jmp}) i = (jmp + 3) if (end_else != start_else): r_end_else = self.restrict_to_parent(end_else, parent) self.structs.append({'type': 'try-else', 'start': (i + 1), 'end': r_end_else}) self.fixed_jumps[i] = r_end_else else: self.fixed_jumps[i] = (i + 1) elif (op in (PJIF, PJIT)): start = (pos + 3) target = self.get_target(pos, op) rtarget = self.restrict_to_parent(target, parent) pre = self.prev if ((target != rtarget) and (parent['type'] == 'and/or')): self.fixed_jumps[pos] = rtarget return if ((code[pre[target]] in (JUMP_IF_FALSE_OR_POP, JUMP_IF_TRUE_OR_POP, PJIF, PJIT)) and (target > pos)): self.fixed_jumps[pos] = pre[target] self.structs.append({'type': 'and/or', 'start': start, 'end': pre[target]}) return if (op == PJIF): match = self.rem_or(start, self.next_stmt[pos], PJIF, target) match = self.remove_mid_line_ifs(match) if match: if ((code[pre[rtarget]] in (JF, JA)) and (pre[rtarget] not in self.stmts) and (self.restrict_to_parent(self.get_target(pre[rtarget]), parent) == rtarget)): if ((code[pre[pre[rtarget]]] == JA) and self.remove_mid_line_ifs([pos]) and (target == self.get_target(pre[pre[rtarget]])) and ((pre[pre[rtarget]] not in self.stmts) or (self.get_target(pre[pre[rtarget]]) > pre[pre[rtarget]])) and (1 == len(self.remove_mid_line_ifs(self.rem_or(start, pre[pre[rtarget]], (PJIF, PJIT), target))))): pass elif ((code[pre[pre[rtarget]]] == RETURN_VALUE) and self.remove_mid_line_ifs([pos]) and (1 == len((set(self.remove_mid_line_ifs(self.rem_or(start, pre[pre[rtarget]], (PJIF, PJIT), target))) | set(self.remove_mid_line_ifs(self.rem_or(start, pre[pre[rtarget]], (PJIF, PJIT, JA), pre[rtarget], True))))))): pass else: fix = None jump_ifs = self.all_instr(start, self.next_stmt[pos], PJIF) last_jump_good = True for j in jump_ifs: if (target == self.get_target(j)): if ((self.lines[j].next == (j + 3)) and last_jump_good): fix = j break else: last_jump_good = False self.fixed_jumps[pos] = (fix or match[(-1)]) return else: self.fixed_jumps[pos] = match[(-1)] return else: if ((pos + 3) in self.load_asserts): if (code[pre[rtarget]] == RAISE_VARARGS): return self.load_asserts.remove((pos + 3)) next = self.next_stmt[pos] if (pre[next] == pos): pass elif ((code[next] in (JF, JA)) and (target == self.get_target(next))): if (code[pre[next]] == PJIF): if ((code[next] == JF) or (target != rtarget) or (code[pre[pre[rtarget]]] not in (JA, RETURN_VALUE))): self.fixed_jumps[pos] = pre[next] return elif ((code[next] == JA) and (code[target] in (JA, JF))): next_target = self.get_target(next) if (self.get_target(target) == next_target): self.fixed_jumps[pos] = pre[next] return elif ((code[next_target] in (JA, JF)) and (self.get_target(next_target) == self.get_target(target))): self.fixed_jumps[pos] = pre[next] return if (pos in self.ignore_if): return if ((code[pre[rtarget]] == JA) and (pre[rtarget] in self.stmts) and (pre[rtarget] != pos) and (pre[pre[rtarget]] != pos)): if ((code[rtarget] == JA) and (code[(rtarget + 3)] == POP_BLOCK)): if (code[pre[pre[rtarget]]] != JA): pass elif (self.get_target(pre[pre[rtarget]]) != target): pass else: rtarget = pre[rtarget] else: rtarget = pre[rtarget] if (code[pre[rtarget]] in (JA, JF)): if_end = self.get_target(pre[rtarget]) if ((if_end < pre[rtarget]) and (code[pre[if_end]] == SETUP_LOOP)): if (if_end > start): return end = self.restrict_to_parent(if_end, parent) self.structs.append({'type': 'if-then', 'start': start, 'end': pre[rtarget]}) self.not_continue.add(pre[rtarget]) if (rtarget < end): self.structs.append({'type': 'if-else', 'start': rtarget, 'end': end}) elif (code[pre[rtarget]] == RETURN_VALUE): self.structs.append({'type': 'if-then', 'start': start, 'end': rtarget}) self.return_end_ifs.add(pre[rtarget]) elif (op in (JUMP_IF_FALSE_OR_POP, JUMP_IF_TRUE_OR_POP)): target = self.get_target(pos, op) self.fixed_jumps[pos] = self.restrict_to_parent(target, parent)
'Detect all offsets in a byte code which are jump targets. Return the list of offsets. This procedure is modelled after dis.findlables(), but here for each target the number of jumps are counted.'
def find_jump_targets(self, code):
hasjrel = dis.hasjrel hasjabs = dis.hasjabs n = len(code) self.structs = [{'type': 'root', 'start': 0, 'end': (n - 1)}] self.loops = [] self.fixed_jumps = {} self.ignore_if = set() self.build_stmt_indices() self.not_continue = set() self.return_end_ifs = set() targets = {} for i in self.op_range(0, n): op = code[i] self.detect_structure(i, op) if (op >= HAVE_ARGUMENT): label = self.fixed_jumps.get(i) oparg = (code[(i + 1)] + (code[(i + 2)] * 256)) if (label is None): if ((op in hasjrel) and (op != FOR_ITER)): label = ((i + 3) + oparg) elif (op in hasjabs): if (op in (JUMP_IF_FALSE_OR_POP, JUMP_IF_TRUE_OR_POP)): if (oparg > i): label = oparg if ((label is not None) and (label != (-1))): targets[label] = (targets.get(label, []) + [i]) elif ((op == END_FINALLY) and (i in self.fixed_jumps)): label = self.fixed_jumps[i] targets[label] = (targets.get(label, []) + [i]) return targets
'exec_stmt ::= expr exprlist DUP_TOP EXEC_STMT exec_stmt ::= expr exprlist EXEC_STMT'
def n_exec_stmt(self, node):
self.write(self.indent, 'exec ') self.preorder(node[0]) if (node[1][0] != NONE): sep = ' in ' for subnode in node[1]: self.write(sep) sep = ', ' self.preorder(subnode) self.print_() self.prune()
'prettyprint a mapexpr \'mapexpr\' is something like k = {\'a\': 1, \'b\': 42 }"'
def n_mapexpr(self, node):
p = self.prec self.prec = 100 assert (node[(-1)] == 'kvlist') node = node[(-1)] self.indentMore(INDENT_PER_LEVEL) line_seperator = (',\n' + self.indent) sep = INDENT_PER_LEVEL[:(-1)] self.write('{') for kv in node: assert (kv in ('kv', 'kv2', 'kv3')) if (kv == 'kv'): name = self.traverse(kv[(-2)], indent='') value = self.traverse(kv[1], indent=(self.indent + ((len(name) + 2) * ' '))) elif (kv == 'kv2'): name = self.traverse(kv[1], indent='') value = self.traverse(kv[(-3)], indent=(self.indent + ((len(name) + 2) * ' '))) elif (kv == 'kv3'): name = self.traverse(kv[(-2)], indent='') value = self.traverse(kv[0], indent=(self.indent + ((len(name) + 2) * ' '))) self.write(sep, name, ': ', value) sep = line_seperator self.write('}') self.indentLess(INDENT_PER_LEVEL) self.prec = p self.prune()
'prettyprint a list or tuple'
def n_build_list(self, node):
p = self.prec self.prec = 100 lastnode = node.pop() lastnodetype = lastnode.type if lastnodetype.startswith('BUILD_LIST'): self.write('[') endchar = ']' elif lastnodetype.startswith('BUILD_TUPLE'): self.write('(') endchar = ')' elif lastnodetype.startswith('BUILD_SET'): self.write('{') endchar = '}' elif lastnodetype.startswith('ROT_TWO'): self.write('(') endchar = ')' else: raise 'Internal Error: n_build_list expects list or tuple' flat_elems = [] for elem in node: if (elem == 'expr1024'): for subelem in elem: for subsubelem in subelem: flat_elems.append(subsubelem) elif (elem == 'expr32'): for subelem in elem: flat_elems.append(subelem) else: flat_elems.append(elem) self.indentMore(INDENT_PER_LEVEL) if (lastnode.attr > 3): line_separator = (',\n' + self.indent) else: line_separator = ', ' sep = INDENT_PER_LEVEL[:(-1)] for elem in flat_elems: if (elem == 'ROT_THREE'): continue assert (elem == 'expr') value = self.traverse(elem) self.write(sep, value) sep = line_separator if ((lastnode.attr == 1) and lastnodetype.startswith('BUILD_TUPLE')): self.write(',') self.write(endchar) self.indentLess(INDENT_PER_LEVEL) self.prec = p self.prune()
'Special handling for opcodes that take a variable number of arguments -- we add a new entry for each in TABLE_R.'
def customize(self, customize):
for (k, v) in customize.items(): if TABLE_R.has_key(k): continue op = k[:k.rfind('_')] if (op == 'CALL_FUNCTION'): TABLE_R[k] = ('%c(%P)', 0, (1, (-1), ', ', 100)) elif (op in ('CALL_FUNCTION_VAR', 'CALL_FUNCTION_VAR_KW', 'CALL_FUNCTION_KW')): if (v == 0): str = '%c(%C' p2 = (0, 0, None) else: str = '%c(%C, ' p2 = (1, (-2), ', ') if (op == 'CALL_FUNCTION_VAR'): str += '*%c)' entry = (str, 0, p2, (-2)) elif (op == 'CALL_FUNCTION_KW'): str += '**%c)' entry = (str, 0, p2, (-2)) else: str += '*%c, **%c)' if p2[2]: p2 = (1, (-3), ', ') entry = (str, 0, p2, (-3), (-2)) TABLE_R[k] = entry
'If the name of the formal parameter starts with dot, it\'s a tuple parameter, like this: # def MyFunc(xx, (a,b,c), yy): # print a, b*2, c*42 In byte-code, the whole tuple is assigned to parameter \'.1\' and then the tuple gets unpacked to \'a\', \'b\' and \'c\'. Since identifiers starting with a dot are illegal in Python, we can search for the byte-code equivalent to \'(a,b,c) = .1\''
def get_tuple_parameter(self, ast, name):
assert (ast == 'stmts') for i in range(len(ast)): assert (ast[i][0] == 'stmt') node = ast[i][0][0] if ((node == 'assign') and (node[0] == ASSIGN_TUPLE_PARAM(name))): del ast[i] assert (node[1] == 'designator') return (('(' + self.traverse(node[1])) + ')') raise ("Can't find tuple parameter" % name)
'Dump function defintion, doc string, and function body.'
def make_function(self, node, isLambda, nested=1):
def build_param(ast, name, default): 'build parameters:\n - handle defaults\n - handle format tuple parameters\n ' if name.startswith('.'): name = self.get_tuple_parameter(ast, name) if default: if self.showast: print '--', name print default print '--' result = ('%s = %s' % (name, self.traverse(default, indent=''))) if (result[(-2):] == '= '): result += 'None' return result else: return name defparams = node[:node[(-1)].attr] code = node[(-2)].attr assert (type(code) == CodeType) code = Code(code, self.scanner, self.currentclass) argc = code.co_argcount paramnames = list(code.co_varnames[:argc]) paramnames.reverse() defparams.reverse() try: ast = self.build_ast(code._tokens, code._customize, isLambda=isLambda, noneInNames=('None' in code.co_names)) except ParserError as p: self.write(str(p)) self.ERROR = p return params = [] for (name, default) in map((lambda a, b: (a, b)), paramnames, defparams): params.append(build_param(ast, name, default)) params.reverse() if (4 & code.co_flags): params.append(('*%s' % code.co_varnames[argc])) argc += 1 if (8 & code.co_flags): params.append(('**%s' % code.co_varnames[argc])) argc += 1 indent = self.indent if isLambda: self.write('lambda ', ', '.join(params), ': ') else: self.print_('(', ', '.join(params), '):') if ((len(code.co_consts) > 0) and (code.co_consts[0] != None)): self.print_docstring(indent, code.co_consts[0]) code._tokens = None assert (ast == 'stmts') all_globals = find_all_globals(ast, set()) for g in ((all_globals & self.mod_globs) | find_globals(ast, set())): self.print_(self.indent, 'global ', g) self.mod_globs -= all_globals rn = (('None' in code.co_names) and (not find_none(ast))) self.gen_source(ast, code._customize, isLambda=isLambda, returnNone=rn) code._tokens = None code._customize = None
'Dump class definition, doc string and class body.'
def build_class(self, code):
assert (type(code) == CodeType) code = Code(code, self.scanner, self.currentclass) indent = self.indent ast = self.build_ast(code._tokens, code._customize) code._tokens = None assert (ast == 'stmts') if (ast[0][0] == NAME_MODULE): del ast[0] if (code.co_consts and (code.co_consts[0] != None) and (ast[0][0] == ASSIGN_DOC_STRING(code.co_consts[0]))): self.print_docstring(indent, code.co_consts[0]) self.print_() del ast[0] if (ast[(-1)][0] == RETURN_LOCALS): del ast[(-1)] for g in find_globals(ast, set()): self.print_(indent, 'global ', g) self.gen_source(ast, code._customize) code._tokens = None code._customize = None
'convert AST to source code'
def gen_source(self, ast, customize, isLambda=0, returnNone=False):
rn = self.return_none self.return_none = returnNone if (len(ast) == 0): self.print_(self.indent, 'pass') else: self.customize(customize) if isLambda: self.write(self.traverse(ast, isLambda=isLambda)) else: self.print_(self.traverse(ast, isLambda=isLambda)) self.return_none = rn
'Places new Gmail notifications in the Notifier\'s queue.'
def handleEmailNotifications(self, lastDate):
emails = Gmail.fetchUnreadEmails(self.profile, since=lastDate) if emails: lastDate = Gmail.getMostRecentDate(emails) def styleEmail(e): return ('New email from %s.' % Gmail.getSender(e)) for e in emails: self.q.put(styleEmail(e)) return lastDate
'Returns a notification. Note that this function is consuming.'
def getNotification(self):
try: notif = self.q.get(block=False) return notif except Queue.Empty: return None
'Return a list of notifications in chronological order. Note that this function is consuming, so consecutive calls will yield different results.'
def getAllNotifications(self):
notifs = [] notif = self.getNotification() while notif: notifs.append(notif) notif = self.getNotification() return notifs
'Calculates a revision from phrases by using the SHA1 hash function. Arguments: phrases -- a list of phrases Returns: A revision string for given phrases.'
@classmethod def phrases_to_revision(cls, phrases):
sorted_phrases = sorted(phrases) joined_phrases = '\n'.join(sorted_phrases) sha1 = hashlib.sha1() sha1.update(joined_phrases) return sha1.hexdigest()
'Initializes a new Vocabulary instance. Optional Arguments: name -- (optional) the name of the vocabulary (Default: \'default\') path -- (optional) the path in which the vocabulary exists or will be created (Default: \'.\')'
def __init__(self, name='default', path='.'):
self.name = name self.path = os.path.abspath(os.path.join(path, self.PATH_PREFIX, name)) self._logger = logging.getLogger(__name__)
'Returns: The path of the the revision file as string'
@property def revision_file(self):
return os.path.join(self.path, 'revision')
'Checks if the vocabulary is compiled by checking if the revision file is readable. This method should be overridden by subclasses to check for class-specific additional files, too. Returns: True if the dictionary is compiled, else False'
@abstractproperty def is_compiled(self):
return os.access(self.revision_file, os.R_OK)
'Reads the compiled revision from the revision file. Returns: the revision of this vocabulary (i.e. the string inside the revision file), or None if is_compiled if False'
@property def compiled_revision(self):
if (not self.is_compiled): return None with open(self.revision_file, 'r') as f: revision = f.read().strip() self._logger.debug("compiled_revision is '%s'", revision) return revision
'Convenience method to check if this vocabulary exactly contains the phrases passed to this method. Arguments: phrases -- a list of phrases Returns: True if phrases exactly matches the phrases inside this vocabulary.'
def matches_phrases(self, phrases):
return (self.compiled_revision == self.phrases_to_revision(phrases))
'Compiles this vocabulary. If the force argument is True, compilation will be forced regardless of necessity (which means that the preliminary check if the current revision already equals the revision after compilation will be skipped). This method is not meant to be overridden by subclasses - use the _compile_vocabulary()-method instead. Arguments: phrases -- a list of phrases that this vocabulary will contain force -- (optional) forces compilation (Default: False) Returns: The revision of the compiled vocabulary'
def compile(self, phrases, force=False):
revision = self.phrases_to_revision(phrases) if ((not force) and (self.compiled_revision == revision)): self._logger.debug(('Compilation not neccessary, compiled ' + 'version matches phrases.')) return revision if (not os.path.exists(self.path)): self._logger.debug(("Vocabulary dir '%s' does not exist, " + 'creating...'), self.path) try: os.makedirs(self.path) except OSError: self._logger.error("Couldn't create vocabulary dir '%s'", self.path, exc_info=True) raise try: with open(self.revision_file, 'w') as f: f.write(revision) except (OSError, IOError): self._logger.error("Couldn't write revision file in '%s'", self.revision_file, exc_info=True) raise else: self._logger.info('Starting compilation...') try: self._compile_vocabulary(phrases) except Exception as e: self._logger.error(('Fatal compilation Error occured, ' + 'cleaning up...'), exc_info=True) try: os.remove(self.revision_file) except OSError: pass raise e else: self._logger.info('Compilation done.') return revision
'Checks if the vocabulary is compiled by checking if the revision file is readable. Returns: True if this vocabulary has been compiled, else False'
@property def is_compiled(self):
return super(self.__class__, self).is_compiled
'Does nothing (because this is a dummy class for testing purposes).'
def _compile_vocabulary(self, phrases):
pass
'Returns: The path of the the pocketsphinx languagemodel file as string'
@property def languagemodel_file(self):
return os.path.join(self.path, 'languagemodel')
'Returns: The path of the pocketsphinx dictionary file as string'
@property def dictionary_file(self):
return os.path.join(self.path, 'dictionary')
'Checks if the vocabulary is compiled by checking if the revision, languagemodel and dictionary files are readable. Returns: True if this vocabulary has been compiled, else False'
@property def is_compiled(self):
return (super(self.__class__, self).is_compiled and os.access(self.languagemodel_file, os.R_OK) and os.access(self.dictionary_file, os.R_OK))
'Convenience property to use this Vocabulary with the __init__() method of the pocketsphinx.Decoder class. Returns: A dict containing kwargs for the pocketsphinx.Decoder.__init__() method. Example: decoder = pocketsphinx.Decoder(**vocab_instance.decoder_kwargs, hmm=\'/path/to/hmm\')'
@property def decoder_kwargs(self):
return {'lm': self.languagemodel_file, 'dict': self.dictionary_file}
'Compiles the vocabulary to the Pocketsphinx format by creating a languagemodel and a dictionary. Arguments: phrases -- a list of phrases that this vocabulary will contain'
def _compile_vocabulary(self, phrases):
text = ' '.join([('<s> %s </s>' % phrase) for phrase in phrases]) self._logger.debug('Compiling languagemodel...') vocabulary = self._compile_languagemodel(text, self.languagemodel_file) self._logger.debug('Starting dictionary...') self._compile_dictionary(vocabulary, self.dictionary_file)
'Compiles the languagemodel from a text. Arguments: text -- the text the languagemodel will be generated from output_file -- the path of the file this languagemodel will be written to Returns: A list of all unique words this vocabulary contains.'
def _compile_languagemodel(self, text, output_file):
with tempfile.NamedTemporaryFile(suffix='.vocab', delete=False) as f: vocab_file = f.name self._logger.debug("Creating vocab file: '%s'", vocab_file) cmuclmtk.text2vocab(text, vocab_file) self._logger.debug("Creating languagemodel file: '%s'", output_file) cmuclmtk.text2lm(text, output_file, vocab_file=vocab_file) self._logger.debug(('Getting words from vocab file and removing it ' + 'afterwards...')) words = [] with open(vocab_file, 'r') as f: for line in f: line = line.strip() if ((not line.startswith('#')) and (line not in ('<s>', '</s>'))): words.append(line) os.remove(vocab_file) return words
'Compiles the dictionary from a list of words. Arguments: words -- a list of all unique words this vocabulary contains output_file -- the path of the file this dictionary will be written to'
def _compile_dictionary(self, words, output_file):
self._logger.debug('Getting phonemes for %d words...', len(words)) g2pconverter = PhonetisaurusG2P(**PhonetisaurusG2P.get_config()) phonemes = g2pconverter.translate(words) self._logger.debug("Creating dict file: '%s'", output_file) with open(output_file, 'w') as f: for (word, pronounciations) in phonemes.items(): for (i, pronounciation) in enumerate(pronounciations, start=1): if (i == 1): line = ('%s DCTB %s\n' % (word, pronounciation)) else: line = ('%s(%d) DCTB %s\n' % (word, i, pronounciation)) f.write(line)
'Returns: The path of the the julius dfa file as string'
@property def dfa_file(self):
return os.path.join(self.path, 'dfa')
'Returns: The path of the the julius dict file as string'
@property def dict_file(self):
return os.path.join(self.path, 'dict')
'Prepare the client and music variables'
def __init__(self, server='localhost', port=6600):
self.server = server self.port = port self.client = mpd.MPDClient() self.client.timeout = None self.client.idletimeout = None self.client.connect(self.server, self.port) self.playlists = [x['playlist'] for x in self.client.listplaylists()] self.client.clear() for playlist in self.playlists: self.client.load(playlist) self.songs = [] self.song_titles = [] self.song_artists = [] soup = self.client.playlist() for i in range(0, (len(soup) / 10)): index = (i * 10) id = soup[index].strip() title = soup[(index + 3)].strip().upper() artist = soup[(index + 2)].strip().upper() album = soup[(index + 4)].strip().upper() self.songs.append(Song(id, title, artist, album)) self.song_titles.append(title) self.song_artists.append(artist)
'Plays the current song or accepts a song to play. Arguments: songs -- a list of song objects playlist_name -- user-defined, something like "Love Song Playlist"'
@reconnect def play(self, songs=False, playlist_name=False):
if songs: self.client.clear() for song in songs: try: self.client.add(song.id) except: pass if playlist_name: self.client.clear() self.client.load(playlist_name) self.client.play()
'Returns the list of unique words that comprise song and artist titles'
def get_soup(self):
soup = [] for song in self.songs: song_words = song.title.split(' ') artist_words = song.artist.split(' ') soup.extend(song_words) soup.extend(artist_words) title_trans = ''.join(((chr(c) if (chr(c).isupper() or chr(c).islower()) else '_') for c in range(256))) soup = [x.decode('utf-8').encode('ascii', 'ignore').upper().translate(title_trans).replace('_', '') for x in soup] soup = [x for x in soup if (x != '')] return list(set(soup))
'Returns the list of unique words that comprise playlist names'
def get_soup_playlist(self):
soup = [] for name in self.playlists: soup.extend(name.split(' ')) title_trans = ''.join(((chr(c) if (chr(c).isupper() or chr(c).islower()) else '_') for c in range(256))) soup = [x.decode('utf-8').encode('ascii', 'ignore').upper().translate(title_trans).replace('_', '') for x in soup] soup = [x for x in soup if (x != '')] return list(set(soup))
'Returns the list of PHRASES that comprise song and artist titles'
def get_soup_separated(self):
title_soup = [song.title for song in self.songs] artist_soup = [song.artist for song in self.songs] soup = list(set((title_soup + artist_soup))) title_trans = ''.join(((chr(c) if (chr(c).isupper() or chr(c).islower()) else '_') for c in range(256))) soup = [x.decode('utf-8').encode('ascii', 'ignore').upper().translate(title_trans).replace('_', ' ') for x in soup] soup = [re.sub(' +', ' ', x) for x in soup if (x != '')] return soup
'Returns songs matching a query best as possible on either artist field, etc'
def fuzzy_songs(self, query):
query = query.upper() matched_song_titles = difflib.get_close_matches(query, self.song_titles) matched_song_artists = difflib.get_close_matches(query, self.song_artists) strict_priority_title = [x for x in matched_song_titles if (x == query)] strict_priority_artists = [x for x in matched_song_artists if (x == query)] if strict_priority_title: matched_song_titles = strict_priority_title if strict_priority_artists: matched_song_artists = strict_priority_artists matched_songs_bytitle = [song for song in self.songs if (song.title in matched_song_titles)] matched_songs_byartist = [song for song in self.songs if (song.artist in matched_song_artists)] matches = list(set((matched_songs_bytitle + matched_songs_byartist))) return matches
'returns playlist names that match query best as possible'
def fuzzy_playlists(self, query):
query = query.upper() lookup = {n.upper(): n for n in self.playlists} results = [lookup[r] for r in difflib.get_close_matches(query, lookup)] return results
'Initiates the pocketsphinx instance. Arguments: speaker -- handles platform-independent audio output passive_stt_engine -- performs STT while Jasper is in passive listen mode acive_stt_engine -- performs STT while Jasper is in active listen mode'
def __init__(self, speaker, passive_stt_engine, active_stt_engine):
self._logger = logging.getLogger(__name__) self.speaker = speaker self.passive_stt_engine = passive_stt_engine self.active_stt_engine = active_stt_engine self._logger.info((('Initializing PyAudio. ALSA/Jack error messages ' + 'that pop up during this process are normal and ') + 'can usually be safely ignored.')) self._audio = pyaudio.PyAudio() self._logger.info('Initialization of PyAudio completed.')
'Listens for PERSONA in everyday sound. Times out after LISTEN_TIME, so needs to be restarted.'
def passiveListen(self, PERSONA):
THRESHOLD_MULTIPLIER = 1.8 RATE = 16000 CHUNK = 1024 THRESHOLD_TIME = 1 LISTEN_TIME = 10 stream = self._audio.open(format=pyaudio.paInt16, channels=1, rate=RATE, input=True, frames_per_buffer=CHUNK) frames = [] lastN = [i for i in range(30)] for i in range(0, ((RATE / CHUNK) * THRESHOLD_TIME)): data = stream.read(CHUNK) frames.append(data) lastN.pop(0) lastN.append(self.getScore(data)) average = (sum(lastN) / len(lastN)) THRESHOLD = (average * THRESHOLD_MULTIPLIER) frames = [] didDetect = False for i in range(0, ((RATE / CHUNK) * LISTEN_TIME)): data = stream.read(CHUNK) frames.append(data) score = self.getScore(data) if (score > THRESHOLD): didDetect = True break if (not didDetect): print 'No disturbance detected' stream.stop_stream() stream.close() return (None, None) frames = frames[(-20):] DELAY_MULTIPLIER = 1 for i in range(0, ((RATE / CHUNK) * DELAY_MULTIPLIER)): data = stream.read(CHUNK) frames.append(data) stream.stop_stream() stream.close() with tempfile.NamedTemporaryFile(mode='w+b') as f: wav_fp = wave.open(f, 'wb') wav_fp.setnchannels(1) wav_fp.setsampwidth(pyaudio.get_sample_size(pyaudio.paInt16)) wav_fp.setframerate(RATE) wav_fp.writeframes(''.join(frames)) wav_fp.close() f.seek(0) transcribed = self.passive_stt_engine.transcribe(f) if any(((PERSONA in phrase) for phrase in transcribed)): return (THRESHOLD, PERSONA) return (False, transcribed)
'Records until a second of silence or times out after 12 seconds Returns the first matching string or None'
def activeListen(self, THRESHOLD=None, LISTEN=True, MUSIC=False):
options = self.activeListenToAllOptions(THRESHOLD, LISTEN, MUSIC) if options: return options[0]
'Records until a second of silence or times out after 12 seconds Returns a list of the matching options or None'
def activeListenToAllOptions(self, THRESHOLD=None, LISTEN=True, MUSIC=False):
RATE = 16000 CHUNK = 1024 LISTEN_TIME = 12 if (THRESHOLD is None): THRESHOLD = self.fetchThreshold() self.speaker.play(jasperpath.data('audio', 'beep_hi.wav')) stream = self._audio.open(format=pyaudio.paInt16, channels=1, rate=RATE, input=True, frames_per_buffer=CHUNK) frames = [] lastN = [(THRESHOLD * 1.2) for i in range(30)] for i in range(0, ((RATE / CHUNK) * LISTEN_TIME)): data = stream.read(CHUNK) frames.append(data) score = self.getScore(data) lastN.pop(0) lastN.append(score) average = (sum(lastN) / float(len(lastN))) if (average < (THRESHOLD * 0.8)): break self.speaker.play(jasperpath.data('audio', 'beep_lo.wav')) stream.stop_stream() stream.close() with tempfile.SpooledTemporaryFile(mode='w+b') as f: wav_fp = wave.open(f, 'wb') wav_fp.setnchannels(1) wav_fp.setsampwidth(pyaudio.get_sample_size(pyaudio.paInt16)) wav_fp.setframerate(RATE) wav_fp.writeframes(''.join(frames)) wav_fp.close() f.seek(0) return self.active_stt_engine.transcribe(f)
'Delegates user input to the handling function when activated.'
def handleForever(self):
self._logger.info("Starting to handle conversation with keyword '%s'.", self.persona) while True: notifications = self.notifier.getAllNotifications() for notif in notifications: self._logger.info("Received notification: '%s'", str(notif)) self._logger.debug("Started listening for keyword '%s'", self.persona) (threshold, transcribed) = self.mic.passiveListen(self.persona) self._logger.debug("Stopped listening for keyword '%s'", self.persona) if ((not transcribed) or (not threshold)): self._logger.info('Nothing has been said or transcribed.') continue self._logger.info("Keyword '%s' has been said!", self.persona) self._logger.debug('Started to listen actively with threshold: %r', threshold) input = self.mic.activeListenToAllOptions(threshold) self._logger.debug('Stopped to listen actively with threshold: %r', threshold) if input: self.brain.query(input) else: self.mic.say('Pardon?')
'Instantiates a new Brain object, which cross-references user input with a list of modules. Note that the order of brain.modules matters, as the Brain will cease execution on the first module that accepts a given input. Arguments: mic -- used to interact with the user (for both input and output) profile -- contains information related to the user (e.g., phone number)'
def __init__(self, mic, profile):
self.mic = mic self.profile = profile self.modules = self.get_modules() self._logger = logging.getLogger(__name__)
'Dynamically loads all the modules in the modules folder and sorts them by the PRIORITY key. If no PRIORITY is defined for a given module, a priority of 0 is assumed.'
@classmethod def get_modules(cls):
logger = logging.getLogger(__name__) locations = [jasperpath.PLUGIN_PATH] logger.debug('Looking for modules in: %s', ', '.join([("'%s'" % location) for location in locations])) modules = [] for (finder, name, ispkg) in pkgutil.walk_packages(locations): try: loader = finder.find_module(name) mod = loader.load_module(name) except: logger.warning("Skipped module '%s' due to an error.", name, exc_info=True) else: if hasattr(mod, 'WORDS'): logger.debug("Found module '%s' with words: %r", name, mod.WORDS) modules.append(mod) else: logger.warning(("Skipped module '%s' because it misses " + 'the WORDS constant.'), name) modules.sort(key=(lambda mod: (mod.PRIORITY if hasattr(mod, 'PRIORITY') else 0)), reverse=True) return modules
'Passes user input to the appropriate module, testing it against each candidate module\'s isValid function. Arguments: text -- user input, typically speech, to be parsed by a module'
def query(self, texts):
for module in self.modules: for text in texts: if module.isValid(text): self._logger.debug(("'%s' is a valid phrase for module " + "'%s'"), text, module.__name__) try: module.handle(text, self.mic, self.profile) except Exception: self._logger.error('Failed to execute module', exc_info=True) self.mic.say(("I'm sorry. I had some trouble with " + 'that operation. Please try again later.')) else: self._logger.debug(("Handling of phrase '%s' by " + "module '%s' completed"), text, module.__name__) finally: return self._logger.debug(('No module was able to handle any of these ' + 'phrases: %r'), texts)
'Initiates the pocketsphinx instance. Arguments: vocabulary -- a PocketsphinxVocabulary instance hmm_dir -- the path of the Hidden Markov Model (HMM)'
def __init__(self, vocabulary, hmm_dir=('/usr/local/share/' + 'pocketsphinx/model/hmm/en_US/hub4wsj_sc_8k')):
self._logger = logging.getLogger(__name__) try: import pocketsphinx as ps except: import pocketsphinx as ps with tempfile.NamedTemporaryFile(prefix='psdecoder_', suffix='.log', delete=False) as f: self._logfile = f.name self._logger.debug(('Initializing PocketSphinx Decoder with hmm_dir ' + "'%s'"), hmm_dir) if (not os.path.exists(hmm_dir)): msg = (("hmm_dir '%s' does not exist! Please make sure that you " + 'have set the correct hmm_dir in your profile.') % hmm_dir) self._logger.error(msg) raise RuntimeError(msg) missing_hmm_files = [] for fname in ('mdef', 'feat.params', 'means', 'noisedict', 'transition_matrices', 'variances'): if (not os.path.exists(os.path.join(hmm_dir, fname))): missing_hmm_files.append(fname) mixweights = os.path.exists(os.path.join(hmm_dir, 'mixture_weights')) sendump = os.path.exists(os.path.join(hmm_dir, 'sendump')) if ((not mixweights) and (not sendump)): missing_hmm_files.append('mixture_weights or sendump') if missing_hmm_files: self._logger.warning((("hmm_dir '%s' is missing files: %s. Please " + 'make sure that you have set the correct ') + 'hmm_dir in your profile.'), hmm_dir, ', '.join(missing_hmm_files)) self._decoder = ps.Decoder(hmm=hmm_dir, logfn=self._logfile, **vocabulary.decoder_kwargs)
'Performs STT, transcribing an audio file and returning the result. Arguments: fp -- a file object containing audio data'
def transcribe(self, fp):
fp.seek(44) data = fp.read() self._decoder.start_utt() self._decoder.process_raw(data, False, True) self._decoder.end_utt() result = self._decoder.get_hyp() with open(self._logfile, 'r+') as f: for line in f: self._logger.debug(line.strip()) f.truncate() transcribed = [result[0]] self._logger.info('Transcribed: %r', transcribed) return transcribed
'Arguments: api_key - the public api key which allows access to Google APIs'
def __init__(self, api_key=None, language='en-us'):
self._logger = logging.getLogger(__name__) self._request_url = None self._language = None self._api_key = None self._http = requests.Session() self.language = language self.api_key = api_key
'Performs STT via the Google Speech API, transcribing an audio file and returning an English string. Arguments: audio_file_path -- the path to the .wav file to be transcribed'
def transcribe(self, fp):
if (not self.api_key): self._logger.critical(('API key missing, transcription request ' + 'aborted.')) return [] elif (not self.language): self._logger.critical(('Language info missing, transcription ' + 'request aborted.')) return [] wav = wave.open(fp, 'rb') frame_rate = wav.getframerate() wav.close() data = fp.read() headers = {'content-type': ('audio/l16; rate=%s' % frame_rate)} r = self._http.post(self.request_url, data=data, headers=headers) try: r.raise_for_status() except requests.exceptions.HTTPError: self._logger.critical('Request failed with http status %d', r.status_code) if (r.status_code == requests.codes['forbidden']): self._logger.warning(('Status 403 is probably caused by an ' + 'invalid Google API key.')) return [] r.encoding = 'utf-8' try: response = json.loads(list(r.text.strip().split('\n', 1))[(-1)]) if (len(response['result']) == 0): raise ValueError('Nothing has been transcribed.') results = [alt['transcript'] for alt in response['result'][0]['alternative']] except ValueError as e: self._logger.warning('Empty response: %s', e.args[0]) results = [] except (KeyError, IndexError): self._logger.warning('Cannot parse response.', exc_info=True) results = [] else: results = tuple((result.upper() for result in results)) self._logger.info('Transcribed: %r', results) return results
'Does Jasper recognize his name (i.e., passive listen)?'
def testTranscribeJasper(self):
with open(self.jasper_clip, mode='rb') as f: transcription = self.passive_stt_engine.transcribe(f) self.assertIn('JASPER', transcription)
'Does Jasper recognize \'time\' (i.e., active listen)?'
def testTranscribe(self):
with open(self.time_clip, mode='rb') as f: transcription = self.active_stt_engine.transcribe(f) self.assertIn('TIME', transcription)
'Does Brain correctly log errors when raised by modules?'
def testLog(self):
my_brain = TestBrain._emptyBrain() unclear = my_brain.modules[(-1)] with mock.patch.object(unclear, 'handle') as mocked_handle: with mock.patch.object(my_brain._logger, 'error') as mocked_log: mocked_handle.side_effect = KeyError('foo') my_brain.query('zzz gibberish zzz') self.assertTrue(mocked_log.called)
'Does Brain sort modules by priority?'
def testSortByPriority(self):
my_brain = TestBrain._emptyBrain() priorities = filter((lambda m: hasattr(m, 'PRIORITY')), my_brain.modules) target = sorted(priorities, key=(lambda m: m.PRIORITY), reverse=True) self.assertEqual(target, priorities)
'Does Brain correctly send query to higher-priority module?'
def testPriority(self):
my_brain = TestBrain._emptyBrain() hn_module = 'HN' hn = filter((lambda m: (m.__name__ == hn_module)), my_brain.modules)[0] with mock.patch.object(hn, 'handle') as mocked_handle: my_brain.query(['hacker news']) self.assertTrue(mocked_handle.called)
'Generic method for spoofing conversation. Arguments: query -- The initial input to the server. inputs -- Additional input, if conversation is extended. Returns: The server\'s responses, in a list.'
def runConversation(self, query, inputs, module):
self.assertTrue(module.isValid(query)) mic = test_mic.Mic(inputs) module.handle(query, mic, self.profile) return mic.outputs
'The lines below is a spider contract. For more info see: http://doc.scrapy.org/en/latest/topics/contracts.html @url http://www.dmoz.org/Computers/Programming/Languages/Python/Resources/ @scrapes name'
def parse(self, response):
sites = response.css('#site-list-content > div.site-item > div.title-and-desc') items = [] for site in sites: item = Website() item['name'] = site.css('a > div.site-title::text').extract_first().strip() item['url'] = site.xpath('a/@href').extract_first().strip() item['description'] = site.css('div.site-descr::text').extract_first().strip() items.append(item) return items
'Since the original function always creates the directory, to resolve the issue a new function had to be created. It\'s a simple copy and was reduced for this case. More info at: https://github.com/scrapy/scrapy/pull/2005'
def _copytree(self, src, dst):
ignore = IGNORE names = os.listdir(src) ignored_names = ignore(src, names) if (not os.path.exists(dst)): os.makedirs(dst) for name in names: if (name in ignored_names): continue srcname = os.path.join(src, name) dstname = os.path.join(dst, name) if os.path.isdir(srcname): self._copytree(srcname, dstname) else: copy2(srcname, dstname) copystat(src, dst)
'Command syntax (preferably one-line). Do not include command name.'
def syntax(self):
return ''
'A short description of the command'
def short_desc(self):
return ''
'A long description of the command. Return short description when not available. It cannot contain newlines, since contents will be formatted by optparser which removes newlines and wraps text.'
def long_desc(self):
return self.short_desc()
'An extensive help for the command. It will be shown when using the "help" command. It can contain newlines, since not post-formatting will be applied to its contents.'
def help(self):
return self.long_desc()
'Populate option parse with options available for this command'
def add_options(self, parser):
group = OptionGroup(parser, 'Global Options') group.add_option('--logfile', metavar='FILE', help='log file. if omitted stderr will be used') group.add_option('-L', '--loglevel', metavar='LEVEL', default=None, help=('log level (default: %s)' % self.settings['LOG_LEVEL'])) group.add_option('--nolog', action='store_true', help='disable logging completely') group.add_option('--profile', metavar='FILE', default=None, help='write python cProfile stats to FILE') group.add_option('--pidfile', metavar='FILE', help='write process ID to FILE') group.add_option('-s', '--set', action='append', default=[], metavar='NAME=VALUE', help='set/override setting (may be repeated)') group.add_option('--pdb', action='store_true', help='enable pdb on failure') parser.add_option_group(group)
'Entry point for running commands'
def run(self, args, opts):
raise NotImplementedError
'Generate the spider module, based on the given template'
def _genspider(self, module, name, domain, template_name, template_file):
tvars = {'project_name': self.settings.get('BOT_NAME'), 'ProjectName': string_camelcase(self.settings.get('BOT_NAME')), 'module': module, 'name': name, 'domain': domain, 'classname': ('%sSpider' % ''.join((s.capitalize() for s in module.split('_'))))} if self.settings.get('NEWSPIDER_MODULE'): spiders_module = import_module(self.settings['NEWSPIDER_MODULE']) spiders_dir = abspath(dirname(spiders_module.__file__)) else: spiders_module = None spiders_dir = '.' spider_file = ('%s.py' % join(spiders_dir, module)) shutil.copyfile(template_file, spider_file) render_templatefile(spider_file, **tvars) print(('Created spider %r using template %r ' % (name, template_name)), end=('' if spiders_module else '\n')) if spiders_module: print(('in module:\n %s.%s' % (spiders_module.__name__, module)))
'You can use this function to update the Scrapy objects that will be available in the shell'
def update_vars(self, vars):
pass
'Start the execution engine'
@defer.inlineCallbacks def start(self):
assert (not self.running), 'Engine already running' self.start_time = time() (yield self.signals.send_catch_log_deferred(signal=signals.engine_started)) self.running = True self._closewait = defer.Deferred() (yield self._closewait)
'Stop the execution engine gracefully'
def stop(self):
assert self.running, 'Engine not running' self.running = False dfd = self._close_all_spiders() return dfd.addBoth((lambda _: self._finish_stopping_engine()))
'Close the execution engine gracefully. If it has already been started, stop it. In all cases, close all spiders and the downloader.'
def close(self):
if self.running: return self.stop() elif self.open_spiders: return self._close_all_spiders() else: return defer.succeed(self.downloader.close())
'Pause the execution engine'
def pause(self):
self.paused = True
'Resume the execution engine'
def unpause(self):
self.paused = False
'Does the engine have capacity to handle more spiders'
def has_capacity(self):
return (not bool(self.slot))
'Called when a spider gets idle. This function is called when there are no remaining pages to download or schedule. It can be called multiple times. If some extension raises a DontCloseSpider exception (in the spider_idle signal handler) the spider is not closed until the next loop and this function is guaranteed to be called (at least) once again for this spider.'
def _spider_idle(self, spider):
res = self.signals.send_catch_log(signal=signals.spider_idle, spider=spider, dont_log=DontCloseSpider) if any(((isinstance(x, Failure) and isinstance(x.value, DontCloseSpider)) for (_, x) in res)): return if self.spider_is_idle(spider): self.close_spider(spider, reason='finished')
'Close (cancel) spider and clear all its outstanding requests'
def close_spider(self, spider, reason='cancelled'):
slot = self.slot if slot.closing: return slot.closing logger.info('Closing spider (%(reason)s)', {'reason': reason}, extra={'spider': spider}) dfd = slot.close() def log_failure(msg): def errback(failure): logger.error(msg, exc_info=failure_to_exc_info(failure), extra={'spider': spider}) return errback dfd.addBoth((lambda _: self.downloader.close())) dfd.addErrback(log_failure('Downloader close failure')) dfd.addBoth((lambda _: self.scraper.close_spider(spider))) dfd.addErrback(log_failure('Scraper close failure')) dfd.addBoth((lambda _: slot.scheduler.close(reason))) dfd.addErrback(log_failure('Scheduler close failure')) dfd.addBoth((lambda _: self.signals.send_catch_log_deferred(signal=signals.spider_closed, spider=spider, reason=reason))) dfd.addErrback(log_failure('Error while sending spider_close signal')) dfd.addBoth((lambda _: self.crawler.stats.close_spider(spider, reason=reason))) dfd.addErrback(log_failure('Stats close failure')) dfd.addBoth((lambda _: logger.info('Spider closed (%(reason)s)', {'reason': reason}, extra={'spider': spider}))) dfd.addBoth((lambda _: setattr(self, 'slot', None))) dfd.addErrback(log_failure('Error while unassigning slot')) dfd.addBoth((lambda _: setattr(self, 'spider', None))) dfd.addErrback(log_failure('Error while unassigning spider')) dfd.addBoth((lambda _: self._spider_closed_callback(spider))) return dfd
'Open the given spider for scraping and allocate resources for it'
@defer.inlineCallbacks def open_spider(self, spider):
self.slot = Slot() (yield self.itemproc.open_spider(spider))
'Close a spider being scraped and release its resources'
def close_spider(self, spider):
slot = self.slot slot.closing = defer.Deferred() slot.closing.addCallback(self.itemproc.close_spider) self._check_if_closing(spider, slot) return slot.closing
'Return True if there isn\'t any more spiders to process'
def is_idle(self):
return (not self.slot)
'Handle the downloaded response or failure through the spider callback/errback'
def _scrape(self, response, request, spider):
assert isinstance(response, (Response, Failure)) dfd = self._scrape2(response, request, spider) dfd.addErrback(self.handle_spider_error, request, response, spider) dfd.addCallback(self.handle_spider_output, request, response, spider) return dfd
'Handle the different cases of request\'s result been a Response or a Failure'
def _scrape2(self, request_result, request, spider):
if (not isinstance(request_result, Failure)): return self.spidermw.scrape_response(self.call_spider, request_result, request, spider) else: dfd = self.call_spider(request_result, request, spider) return dfd.addErrback(self._log_download_errors, request_result, request, spider)
'Process each Request/Item (given in the output parameter) returned from the given spider'
def _process_spidermw_output(self, output, request, response, spider):
if isinstance(output, Request): self.crawler.engine.crawl(request=output, spider=spider) elif isinstance(output, (BaseItem, dict)): self.slot.itemproc_size += 1 dfd = self.itemproc.process_item(output, spider) dfd.addBoth(self._itemproc_finished, output, response, spider) return dfd elif (output is None): pass else: typename = type(output).__name__ logger.error('Spider must return Request, BaseItem, dict or None, got %(typename)r in %(request)s', {'request': request, 'typename': typename}, extra={'spider': spider})
'Log and silence errors that come from the engine (typically download errors that got propagated thru here)'
def _log_download_errors(self, spider_failure, download_failure, request, spider):
if (isinstance(download_failure, Failure) and (not download_failure.check(IgnoreRequest))): if download_failure.frames: logger.error('Error downloading %(request)s', {'request': request}, exc_info=failure_to_exc_info(download_failure), extra={'spider': spider}) else: errmsg = download_failure.getErrorMessage() if errmsg: logger.error('Error downloading %(request)s: %(errmsg)s', {'request': request, 'errmsg': errmsg}, extra={'spider': spider}) if (spider_failure is not download_failure): return spider_failure
'ItemProcessor finished for the given ``item`` and returned ``output``'
def _itemproc_finished(self, output, item, response, spider):
self.slot.itemproc_size -= 1 if isinstance(output, Failure): ex = output.value if isinstance(ex, DropItem): logkws = self.logformatter.dropped(item, ex, response, spider) logger.log(extra={'spider': spider}, *logformatter_adapter(logkws)) return self.signals.send_catch_log_deferred(signal=signals.item_dropped, item=item, response=response, spider=spider, exception=output.value) else: logger.error('Error processing %(item)s', {'item': item}, exc_info=failure_to_exc_info(output), extra={'spider': spider}) else: logkws = self.logformatter.scraped(output, response, spider) logger.log(extra={'spider': spider}, *logformatter_adapter(logkws)) return self.signals.send_catch_log_deferred(signal=signals.item_scraped, item=output, response=response, spider=spider)
'Lazy-load the downloadhandler for a scheme only on the first request for that scheme.'
def _get_handler(self, scheme):
if (scheme in self._handlers): return self._handlers[scheme] if (scheme in self._notconfigured): return None if (scheme not in self._schemes): self._notconfigured[scheme] = 'no handler available for that scheme' return None path = self._schemes[scheme] try: dhcls = load_object(path) dh = dhcls(self._crawler.settings) except NotConfigured as ex: self._notconfigured[scheme] = str(ex) return None except Exception as ex: logger.error('Loading "%(clspath)s" for scheme "%(scheme)s"', {'clspath': path, 'scheme': scheme}, exc_info=True, extra={'crawler': self._crawler}) self._notconfigured[scheme] = str(ex) return None else: self._handlers[scheme] = dh return self._handlers[scheme]
'Return a deferred for the HTTP download'
def download_request(self, request, spider):
agent = ScrapyAgent(contextFactory=self._contextFactory, pool=self._pool, maxsize=getattr(spider, 'download_maxsize', self._default_maxsize), warnsize=getattr(spider, 'download_warnsize', self._default_warnsize), fail_on_dataloss=self._fail_on_dataloss) return agent.download_request(request)
'Asks the proxy to open a tunnel.'
def requestTunnel(self, protocol):
tunnelReq = tunnel_request_data(self._tunneledHost, self._tunneledPort, self._proxyAuthHeader) protocol.transport.write(tunnelReq) self._protocolDataReceived = protocol.dataReceived protocol.dataReceived = self.processProxyResponse self._protocol = protocol return protocol
'Processes the response from the proxy. If the tunnel is successfully created, notifies the client that we are ready to send requests. If not raises a TunnelError.'
def processProxyResponse(self, rcvd_bytes):
self._connectBuffer += rcvd_bytes if ('\r\n\r\n' not in self._connectBuffer): return self._protocol.dataReceived = self._protocolDataReceived respm = TunnelingTCP4ClientEndpoint._responseMatcher.match(self._connectBuffer) if (respm and (int(respm.group('status')) == 200)): try: sslOptions = self._contextFactory.creatorForNetloc(self._tunneledHost, self._tunneledPort) except AttributeError: sslOptions = self._contextFactory self._protocol.transport.startTLS(sslOptions, self._protocolFactory) self._tunnelReadyDeferred.callback(self._protocol) else: if respm: extra = {'status': int(respm.group('status')), 'reason': respm.group('reason').strip()} else: extra = rcvd_bytes[:32] self._tunnelReadyDeferred.errback(TunnelError(('Could not open CONNECT tunnel with proxy %s:%s [%r]' % (self._host, self._port, extra))))
'Propagates the errback to the appropriate deferred.'
def connectFailed(self, reason):
self._tunnelReadyDeferred.errback(reason)
'Return a deferred for the HTTP download'
def download_request(self, request, spider):
factory = self.HTTPClientFactory(request) self._connect(factory) return factory.deferred
'Upload file to S3 storage'
def persist_file(self, path, buf, info, meta=None, headers=None):
key_name = ('%s%s' % (self.prefix, path)) buf.seek(0) if self.is_botocore: extra = self._headers_to_botocore_kwargs(self.HEADERS) if headers: extra.update(self._headers_to_botocore_kwargs(headers)) return threads.deferToThread(self.s3_client.put_object, Bucket=self.bucket, Key=key_name, Body=buf, Metadata={k: str(v) for (k, v) in six.iteritems((meta or {}))}, ACL=self.POLICY, **extra) else: b = self._get_boto_bucket() k = b.new_key(key_name) if meta: for (metakey, metavalue) in six.iteritems(meta): k.set_metadata(metakey, str(metavalue)) h = self.HEADERS.copy() if headers: h.update(headers) return threads.deferToThread(k.set_contents_from_string, buf.getvalue(), headers=h, policy=self.POLICY)
'Convert headers to botocore keyword agruments.'
def _headers_to_botocore_kwargs(self, headers):
mapping = CaselessDict({'Content-Type': 'ContentType', 'Cache-Control': 'CacheControl', 'Content-Disposition': 'ContentDisposition', 'Content-Encoding': 'ContentEncoding', 'Content-Language': 'ContentLanguage', 'Content-Length': 'ContentLength', 'Content-MD5': 'ContentMD5', 'Expires': 'Expires', 'X-Amz-Grant-Full-Control': 'GrantFullControl', 'X-Amz-Grant-Read': 'GrantRead', 'X-Amz-Grant-Read-ACP': 'GrantReadACP', 'X-Amz-Grant-Write-ACP': 'GrantWriteACP'}) extra = {} for (key, value) in six.iteritems(headers): try: kwarg = mapping[key] except KeyError: raise TypeError(('Header "%s" is not supported by botocore' % key)) else: extra[kwarg] = value return extra
YAML Metadata Warning: empty or missing yaml metadata in repo card (https://huggingface.co/docs/hub/datasets-cards)

HF version of Edinburgh-NLP's Code docstrings corpus

Downloads last month
28
Edit dataset card