_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q280500
HasTraits.trait_metadata
test
def trait_metadata(self, traitname, key): """Get metadata values for trait by key.""" try: trait = getattr(self.__class__, traitname) except AttributeError: raise TraitError("Class %s does not have a trait named %s" % (self.__class__.__name__, traitname)) else: return trait.get_metadata(key)
python
{ "resource": "" }
q280501
Type.validate
test
def validate(self, obj, value): """Validates that the value is a valid object instance.""" try: if issubclass(value, self.klass): return value except: if (value is None) and (self._allow_none): return value self.error(obj, value)
python
{ "resource": "" }
q280502
Instance.get_default_value
test
def get_default_value(self): """Instantiate a default value instance. This is called when the containing HasTraits classes' :meth:`__new__` method is called to ensure that a unique instance is created for each HasTraits instance. """ dv = self.default_value if isinstance(dv, DefaultValueGenerator): return dv.generate(self.klass) else: return dv
python
{ "resource": "" }
q280503
Dependency.check
test
def check(self, completed, failed=None): """check whether our dependencies have been met.""" if len(self) == 0: return True against = set() if self.success: against = completed if failed is not None and self.failure: against = against.union(failed) if self.all: return self.issubset(against) else: return not self.isdisjoint(against)
python
{ "resource": "" }
q280504
Dependency.unreachable
test
def unreachable(self, completed, failed=None): """return whether this dependency has become impossible.""" if len(self) == 0: return False against = set() if not self.success: against = completed if failed is not None and not self.failure: against = against.union(failed) if self.all: return not self.isdisjoint(against) else: return self.issubset(against)
python
{ "resource": "" }
q280505
Dependency.as_dict
test
def as_dict(self): """Represent this dependency as a dict. For json compatibility.""" return dict( dependencies=list(self), all=self.all, success=self.success, failure=self.failure )
python
{ "resource": "" }
q280506
depth
test
def depth(n, tree): """get depth of an element in the tree""" d = 0 parent = tree[n] while parent is not None: d += 1 parent = tree[parent] return d
python
{ "resource": "" }
q280507
print_bintree
test
def print_bintree(tree, indent=' '): """print a binary tree""" for n in sorted(tree.keys()): print "%s%s" % (indent * depth(n,tree), n)
python
{ "resource": "" }
q280508
disambiguate_dns_url
test
def disambiguate_dns_url(url, location): """accept either IP address or dns name, and return IP""" if not ip_pat.match(location): location = socket.gethostbyname(location) return disambiguate_url(url, location)
python
{ "resource": "" }
q280509
BinaryTreeCommunicator.allreduce
test
def allreduce(self, f, value, flat=True): """parallel reduce followed by broadcast of the result""" return self.reduce(f, value, flat=flat, all=True)
python
{ "resource": "" }
q280510
Hub._validate_targets
test
def _validate_targets(self, targets): """turn any valid targets argument into a list of integer ids""" if targets is None: # default to all return self.ids if isinstance(targets, (int,str,unicode)): # only one target specified targets = [targets] _targets = [] for t in targets: # map raw identities to ids if isinstance(t, (str,unicode)): t = self.by_ident.get(cast_bytes(t), t) _targets.append(t) targets = _targets bad_targets = [ t for t in targets if t not in self.ids ] if bad_targets: raise IndexError("No Such Engine: %r" % bad_targets) if not targets: raise IndexError("No Engines Registered") return targets
python
{ "resource": "" }
q280511
Hub.dispatch_monitor_traffic
test
def dispatch_monitor_traffic(self, msg): """all ME and Task queue messages come through here, as well as IOPub traffic.""" self.log.debug("monitor traffic: %r", msg[0]) switch = msg[0] try: idents, msg = self.session.feed_identities(msg[1:]) except ValueError: idents=[] if not idents: self.log.error("Monitor message without topic: %r", msg) return handler = self.monitor_handlers.get(switch, None) if handler is not None: handler(idents, msg) else: self.log.error("Unrecognized monitor topic: %r", switch)
python
{ "resource": "" }
q280512
Hub.dispatch_query
test
def dispatch_query(self, msg): """Route registration requests and queries from clients.""" try: idents, msg = self.session.feed_identities(msg) except ValueError: idents = [] if not idents: self.log.error("Bad Query Message: %r", msg) return client_id = idents[0] try: msg = self.session.unserialize(msg, content=True) except Exception: content = error.wrap_exception() self.log.error("Bad Query Message: %r", msg, exc_info=True) self.session.send(self.query, "hub_error", ident=client_id, content=content) return # print client_id, header, parent, content #switch on message type: msg_type = msg['header']['msg_type'] self.log.info("client::client %r requested %r", client_id, msg_type) handler = self.query_handlers.get(msg_type, None) try: assert handler is not None, "Bad Message Type: %r" % msg_type except: content = error.wrap_exception() self.log.error("Bad Message Type: %r", msg_type, exc_info=True) self.session.send(self.query, "hub_error", ident=client_id, content=content) return else: handler(idents, msg)
python
{ "resource": "" }
q280513
Hub.handle_new_heart
test
def handle_new_heart(self, heart): """handler to attach to heartbeater. Called when a new heart starts to beat. Triggers completion of registration.""" self.log.debug("heartbeat::handle_new_heart(%r)", heart) if heart not in self.incoming_registrations: self.log.info("heartbeat::ignoring new heart: %r", heart) else: self.finish_registration(heart)
python
{ "resource": "" }
q280514
Hub.handle_heart_failure
test
def handle_heart_failure(self, heart): """handler to attach to heartbeater. called when a previously registered heart fails to respond to beat request. triggers unregistration""" self.log.debug("heartbeat::handle_heart_failure(%r)", heart) eid = self.hearts.get(heart, None) queue = self.engines[eid].queue if eid is None or self.keytable[eid] in self.dead_engines: self.log.info("heartbeat::ignoring heart failure %r (not an engine or already dead)", heart) else: self.unregister_engine(heart, dict(content=dict(id=eid, queue=queue)))
python
{ "resource": "" }
q280515
Hub.save_task_request
test
def save_task_request(self, idents, msg): """Save the submission of a task.""" client_id = idents[0] try: msg = self.session.unserialize(msg) except Exception: self.log.error("task::client %r sent invalid task message: %r", client_id, msg, exc_info=True) return record = init_record(msg) record['client_uuid'] = client_id.decode('ascii') record['queue'] = 'task' header = msg['header'] msg_id = header['msg_id'] self.pending.add(msg_id) self.unassigned.add(msg_id) try: # it's posible iopub arrived first: existing = self.db.get_record(msg_id) if existing['resubmitted']: for key in ('submitted', 'client_uuid', 'buffers'): # don't clobber these keys on resubmit # submitted and client_uuid should be different # and buffers might be big, and shouldn't have changed record.pop(key) # still check content,header which should not change # but are not expensive to compare as buffers for key,evalue in existing.iteritems(): if key.endswith('buffers'): # don't compare buffers continue rvalue = record.get(key, None) if evalue and rvalue and evalue != rvalue: self.log.warn("conflicting initial state for record: %r:%r <%r> %r", msg_id, rvalue, key, evalue) elif evalue and not rvalue: record[key] = evalue try: self.db.update_record(msg_id, record) except Exception: self.log.error("DB Error updating record %r", msg_id, exc_info=True) except KeyError: try: self.db.add_record(msg_id, record) except Exception: self.log.error("DB Error adding record %r", msg_id, exc_info=True) except Exception: self.log.error("DB Error saving task request %r", msg_id, exc_info=True)
python
{ "resource": "" }
q280516
Hub.save_task_result
test
def save_task_result(self, idents, msg): """save the result of a completed task.""" client_id = idents[0] try: msg = self.session.unserialize(msg) except Exception: self.log.error("task::invalid task result message send to %r: %r", client_id, msg, exc_info=True) return parent = msg['parent_header'] if not parent: # print msg self.log.warn("Task %r had no parent!", msg) return msg_id = parent['msg_id'] if msg_id in self.unassigned: self.unassigned.remove(msg_id) header = msg['header'] engine_uuid = header.get('engine', u'') eid = self.by_ident.get(cast_bytes(engine_uuid), None) status = header.get('status', None) if msg_id in self.pending: self.log.info("task::task %r finished on %s", msg_id, eid) self.pending.remove(msg_id) self.all_completed.add(msg_id) if eid is not None: if status != 'aborted': self.completed[eid].append(msg_id) if msg_id in self.tasks[eid]: self.tasks[eid].remove(msg_id) completed = header['date'] started = header.get('started', None) result = { 'result_header' : header, 'result_content': msg['content'], 'started' : started, 'completed' : completed, 'received' : datetime.now(), 'engine_uuid': engine_uuid, } result['result_buffers'] = msg['buffers'] try: self.db.update_record(msg_id, result) except Exception: self.log.error("DB Error saving task request %r", msg_id, exc_info=True) else: self.log.debug("task::unknown task %r finished", msg_id)
python
{ "resource": "" }
q280517
Hub.save_iopub_message
test
def save_iopub_message(self, topics, msg): """save an iopub message into the db""" # print (topics) try: msg = self.session.unserialize(msg, content=True) except Exception: self.log.error("iopub::invalid IOPub message", exc_info=True) return parent = msg['parent_header'] if not parent: self.log.warn("iopub::IOPub message lacks parent: %r", msg) return msg_id = parent['msg_id'] msg_type = msg['header']['msg_type'] content = msg['content'] # ensure msg_id is in db try: rec = self.db.get_record(msg_id) except KeyError: rec = empty_record() rec['msg_id'] = msg_id self.db.add_record(msg_id, rec) # stream d = {} if msg_type == 'stream': name = content['name'] s = rec[name] or '' d[name] = s + content['data'] elif msg_type == 'pyerr': d['pyerr'] = content elif msg_type == 'pyin': d['pyin'] = content['code'] elif msg_type in ('display_data', 'pyout'): d[msg_type] = content elif msg_type == 'status': pass else: self.log.warn("unhandled iopub msg_type: %r", msg_type) if not d: return try: self.db.update_record(msg_id, d) except Exception: self.log.error("DB Error saving iopub message %r", msg_id, exc_info=True)
python
{ "resource": "" }
q280518
Hub.connection_request
test
def connection_request(self, client_id, msg): """Reply with connection addresses for clients.""" self.log.info("client::client %r connected", client_id) content = dict(status='ok') content.update(self.client_info) jsonable = {} for k,v in self.keytable.iteritems(): if v not in self.dead_engines: jsonable[str(k)] = v.decode('ascii') content['engines'] = jsonable self.session.send(self.query, 'connection_reply', content, parent=msg, ident=client_id)
python
{ "resource": "" }
q280519
Hub.register_engine
test
def register_engine(self, reg, msg): """Register a new engine.""" content = msg['content'] try: queue = cast_bytes(content['queue']) except KeyError: self.log.error("registration::queue not specified", exc_info=True) return heart = content.get('heartbeat', None) if heart: heart = cast_bytes(heart) """register a new engine, and create the socket(s) necessary""" eid = self._next_id # print (eid, queue, reg, heart) self.log.debug("registration::register_engine(%i, %r, %r, %r)", eid, queue, reg, heart) content = dict(id=eid,status='ok') content.update(self.engine_info) # check if requesting available IDs: if queue in self.by_ident: try: raise KeyError("queue_id %r in use" % queue) except: content = error.wrap_exception() self.log.error("queue_id %r in use", queue, exc_info=True) elif heart in self.hearts: # need to check unique hearts? try: raise KeyError("heart_id %r in use" % heart) except: self.log.error("heart_id %r in use", heart, exc_info=True) content = error.wrap_exception() else: for h, pack in self.incoming_registrations.iteritems(): if heart == h: try: raise KeyError("heart_id %r in use" % heart) except: self.log.error("heart_id %r in use", heart, exc_info=True) content = error.wrap_exception() break elif queue == pack[1]: try: raise KeyError("queue_id %r in use" % queue) except: self.log.error("queue_id %r in use", queue, exc_info=True) content = error.wrap_exception() break msg = self.session.send(self.query, "registration_reply", content=content, ident=reg) if content['status'] == 'ok': if heart in self.heartmonitor.hearts: # already beating self.incoming_registrations[heart] = (eid,queue,reg[0],None) self.finish_registration(heart) else: purge = lambda : self._purge_stalled_registration(heart) dc = ioloop.DelayedCallback(purge, self.registration_timeout, self.loop) dc.start() self.incoming_registrations[heart] = (eid,queue,reg[0],dc) else: self.log.error("registration::registration %i failed: %r", eid, content['evalue']) return eid
python
{ "resource": "" }
q280520
Hub.unregister_engine
test
def unregister_engine(self, ident, msg): """Unregister an engine that explicitly requested to leave.""" try: eid = msg['content']['id'] except: self.log.error("registration::bad engine id for unregistration: %r", ident, exc_info=True) return self.log.info("registration::unregister_engine(%r)", eid) # print (eid) uuid = self.keytable[eid] content=dict(id=eid, queue=uuid.decode('ascii')) self.dead_engines.add(uuid) # self.ids.remove(eid) # uuid = self.keytable.pop(eid) # # ec = self.engines.pop(eid) # self.hearts.pop(ec.heartbeat) # self.by_ident.pop(ec.queue) # self.completed.pop(eid) handleit = lambda : self._handle_stranded_msgs(eid, uuid) dc = ioloop.DelayedCallback(handleit, self.registration_timeout, self.loop) dc.start() ############## TODO: HANDLE IT ################ if self.notifier: self.session.send(self.notifier, "unregistration_notification", content=content)
python
{ "resource": "" }
q280521
Hub.finish_registration
test
def finish_registration(self, heart): """Second half of engine registration, called after our HeartMonitor has received a beat from the Engine's Heart.""" try: (eid,queue,reg,purge) = self.incoming_registrations.pop(heart) except KeyError: self.log.error("registration::tried to finish nonexistant registration", exc_info=True) return self.log.info("registration::finished registering engine %i:%r", eid, queue) if purge is not None: purge.stop() control = queue self.ids.add(eid) self.keytable[eid] = queue self.engines[eid] = EngineConnector(id=eid, queue=queue, registration=reg, control=control, heartbeat=heart) self.by_ident[queue] = eid self.queues[eid] = list() self.tasks[eid] = list() self.completed[eid] = list() self.hearts[heart] = eid content = dict(id=eid, queue=self.engines[eid].queue.decode('ascii')) if self.notifier: self.session.send(self.notifier, "registration_notification", content=content) self.log.info("engine::Engine Connected: %i", eid)
python
{ "resource": "" }
q280522
Hub.shutdown_request
test
def shutdown_request(self, client_id, msg): """handle shutdown request.""" self.session.send(self.query, 'shutdown_reply', content={'status': 'ok'}, ident=client_id) # also notify other clients of shutdown self.session.send(self.notifier, 'shutdown_notice', content={'status': 'ok'}) dc = ioloop.DelayedCallback(lambda : self._shutdown(), 1000, self.loop) dc.start()
python
{ "resource": "" }
q280523
Hub.purge_results
test
def purge_results(self, client_id, msg): """Purge results from memory. This method is more valuable before we move to a DB based message storage mechanism.""" content = msg['content'] self.log.info("Dropping records with %s", content) msg_ids = content.get('msg_ids', []) reply = dict(status='ok') if msg_ids == 'all': try: self.db.drop_matching_records(dict(completed={'$ne':None})) except Exception: reply = error.wrap_exception() else: pending = filter(lambda m: m in self.pending, msg_ids) if pending: try: raise IndexError("msg pending: %r" % pending[0]) except: reply = error.wrap_exception() else: try: self.db.drop_matching_records(dict(msg_id={'$in':msg_ids})) except Exception: reply = error.wrap_exception() if reply['status'] == 'ok': eids = content.get('engine_ids', []) for eid in eids: if eid not in self.engines: try: raise IndexError("No such engine: %i" % eid) except: reply = error.wrap_exception() break uid = self.engines[eid].queue try: self.db.drop_matching_records(dict(engine_uuid=uid, completed={'$ne':None})) except Exception: reply = error.wrap_exception() break self.session.send(self.query, 'purge_reply', content=reply, ident=client_id)
python
{ "resource": "" }
q280524
Hub._extract_record
test
def _extract_record(self, rec): """decompose a TaskRecord dict into subsection of reply for get_result""" io_dict = {} for key in ('pyin', 'pyout', 'pyerr', 'stdout', 'stderr'): io_dict[key] = rec[key] content = { 'result_content': rec['result_content'], 'header': rec['header'], 'result_header' : rec['result_header'], 'received' : rec['received'], 'io' : io_dict, } if rec['result_buffers']: buffers = map(bytes, rec['result_buffers']) else: buffers = [] return content, buffers
python
{ "resource": "" }
q280525
Hub.get_results
test
def get_results(self, client_id, msg): """Get the result of 1 or more messages.""" content = msg['content'] msg_ids = sorted(set(content['msg_ids'])) statusonly = content.get('status_only', False) pending = [] completed = [] content = dict(status='ok') content['pending'] = pending content['completed'] = completed buffers = [] if not statusonly: try: matches = self.db.find_records(dict(msg_id={'$in':msg_ids})) # turn match list into dict, for faster lookup records = {} for rec in matches: records[rec['msg_id']] = rec except Exception: content = error.wrap_exception() self.session.send(self.query, "result_reply", content=content, parent=msg, ident=client_id) return else: records = {} for msg_id in msg_ids: if msg_id in self.pending: pending.append(msg_id) elif msg_id in self.all_completed: completed.append(msg_id) if not statusonly: c,bufs = self._extract_record(records[msg_id]) content[msg_id] = c buffers.extend(bufs) elif msg_id in records: if rec['completed']: completed.append(msg_id) c,bufs = self._extract_record(records[msg_id]) content[msg_id] = c buffers.extend(bufs) else: pending.append(msg_id) else: try: raise KeyError('No such message: '+msg_id) except: content = error.wrap_exception() break self.session.send(self.query, "result_reply", content=content, parent=msg, ident=client_id, buffers=buffers)
python
{ "resource": "" }
q280526
Hub.get_history
test
def get_history(self, client_id, msg): """Get a list of all msg_ids in our DB records""" try: msg_ids = self.db.get_history() except Exception as e: content = error.wrap_exception() else: content = dict(status='ok', history=msg_ids) self.session.send(self.query, "history_reply", content=content, parent=msg, ident=client_id)
python
{ "resource": "" }
q280527
Hub.db_query
test
def db_query(self, client_id, msg): """Perform a raw query on the task record database.""" content = msg['content'] query = content.get('query', {}) keys = content.get('keys', None) buffers = [] empty = list() try: records = self.db.find_records(query, keys) except Exception as e: content = error.wrap_exception() else: # extract buffers from reply content: if keys is not None: buffer_lens = [] if 'buffers' in keys else None result_buffer_lens = [] if 'result_buffers' in keys else None else: buffer_lens = None result_buffer_lens = None for rec in records: # buffers may be None, so double check b = rec.pop('buffers', empty) or empty if buffer_lens is not None: buffer_lens.append(len(b)) buffers.extend(b) rb = rec.pop('result_buffers', empty) or empty if result_buffer_lens is not None: result_buffer_lens.append(len(rb)) buffers.extend(rb) content = dict(status='ok', records=records, buffer_lens=buffer_lens, result_buffer_lens=result_buffer_lens) # self.log.debug (content) self.session.send(self.query, "db_reply", content=content, parent=msg, ident=client_id, buffers=buffers)
python
{ "resource": "" }
q280528
Rscript.cd
test
def cd(self, newdir): """ go to the path """ prevdir = os.getcwd() os.chdir(newdir) try: yield finally: os.chdir(prevdir)
python
{ "resource": "" }
q280529
Rscript.decode_cmd_out
test
def decode_cmd_out(self, completed_cmd): """ return a standard message """ try: stdout = completed_cmd.stdout.encode('utf-8').decode() except AttributeError: try: stdout = str(bytes(completed_cmd.stdout), 'big5').strip() except AttributeError: stdout = str(bytes(completed_cmd.stdout).decode('utf-8')).strip() try: stderr = completed_cmd.stderr.encode('utf-8').decode() except AttributeError: try: stderr = str(bytes(completed_cmd.stderr), 'big5').strip() except AttributeError: stderr = str(bytes(completed_cmd.stderr).decode('utf-8')).strip() return ParsedCompletedCommand( completed_cmd.returncode, completed_cmd.args, stdout, stderr )
python
{ "resource": "" }
q280530
Rscript.run_command_under_r_root
test
def run_command_under_r_root(self, cmd, catched=True): """ subprocess run on here """ RPATH = self.path with self.cd(newdir=RPATH): if catched: process = sp.run(cmd, stdout=sp.PIPE, stderr=sp.PIPE) else: process = sp.run(cmd) return process
python
{ "resource": "" }
q280531
Rscript.execute
test
def execute(self): """ Execute R script """ rprocess = OrderedDict() commands = OrderedDict([ (self.file, ['Rscript', self.file] + self.cmd), ]) for cmd_name, cmd in commands.items(): rprocess[cmd_name] = self.run_command_under_r_root(cmd) return self.decode_cmd_out(completed_cmd=rprocess[self.file])
python
{ "resource": "" }
q280532
BaseFrontendMixin._dispatch
test
def _dispatch(self, msg): """ Calls the frontend handler associated with the message type of the given message. """ msg_type = msg['header']['msg_type'] handler = getattr(self, '_handle_' + msg_type, None) if handler: handler(msg)
python
{ "resource": "" }
q280533
BaseFrontendMixin._is_from_this_session
test
def _is_from_this_session(self, msg): """ Returns whether a reply from the kernel originated from a request from this frontend. """ session = self._kernel_manager.session.session parent = msg['parent_header'] if not parent: # if the message has no parent, assume it is meant for all frontends return True else: return parent.get('session') == session
python
{ "resource": "" }
q280534
AnnotateReporter.report
test
def report(self, morfs, directory=None): """Run the report. See `coverage.report()` for arguments. """ self.report_files(self.annotate_file, morfs, directory)
python
{ "resource": "" }
q280535
AnnotateReporter.annotate_file
test
def annotate_file(self, cu, analysis): """Annotate a single file. `cu` is the CodeUnit for the file to annotate. """ if not cu.relative: return filename = cu.filename source = cu.source_file() if self.directory: dest_file = os.path.join(self.directory, cu.flat_rootname()) dest_file += ".py,cover" else: dest_file = filename + ",cover" dest = open(dest_file, 'w') statements = sorted(analysis.statements) missing = sorted(analysis.missing) excluded = sorted(analysis.excluded) lineno = 0 i = 0 j = 0 covered = True while True: line = source.readline() if line == '': break lineno += 1 while i < len(statements) and statements[i] < lineno: i += 1 while j < len(missing) and missing[j] < lineno: j += 1 if i < len(statements) and statements[i] == lineno: covered = j >= len(missing) or missing[j] > lineno if self.blank_re.match(line): dest.write(' ') elif self.else_re.match(line): # Special logic for lines containing only 'else:'. if i >= len(statements) and j >= len(missing): dest.write('! ') elif i >= len(statements) or j >= len(missing): dest.write('> ') elif statements[i] == missing[j]: dest.write('! ') else: dest.write('> ') elif lineno in excluded: dest.write('- ') elif covered: dest.write('> ') else: dest.write('! ') dest.write(line) source.close() dest.close()
python
{ "resource": "" }
q280536
get_installed_version
test
def get_installed_version(name): ''' returns installed package version and None if package is not installed ''' pattern = re.compile(r'''Installed:\s+(?P<version>.*)''') cmd = 'apt-cache policy %s' % name args = shlex.split(cmd) try: output = subprocess.check_output(args) if not output: return None except CalledProcessError: return None # check output match = pattern.search(output) if match: version = match.groupdict()['version'] if version == '(none)': return None else: return version
python
{ "resource": "" }
q280537
squash_unicode
test
def squash_unicode(obj): """coerce unicode back to bytestrings.""" if isinstance(obj,dict): for key in obj.keys(): obj[key] = squash_unicode(obj[key]) if isinstance(key, unicode): obj[squash_unicode(key)] = obj.pop(key) elif isinstance(obj, list): for i,v in enumerate(obj): obj[i] = squash_unicode(v) elif isinstance(obj, unicode): obj = obj.encode('utf8') return obj
python
{ "resource": "" }
q280538
extract_header
test
def extract_header(msg_or_header): """Given a message or header, return the header.""" if not msg_or_header: return {} try: # See if msg_or_header is the entire message. h = msg_or_header['header'] except KeyError: try: # See if msg_or_header is just the header h = msg_or_header['msg_id'] except KeyError: raise else: h = msg_or_header if not isinstance(h, dict): h = dict(h) return h
python
{ "resource": "" }
q280539
Session._check_packers
test
def _check_packers(self): """check packers for binary data and datetime support.""" pack = self.pack unpack = self.unpack # check simple serialization msg = dict(a=[1,'hi']) try: packed = pack(msg) except Exception: raise ValueError("packer could not serialize a simple message") # ensure packed message is bytes if not isinstance(packed, bytes): raise ValueError("message packed to %r, but bytes are required"%type(packed)) # check that unpack is pack's inverse try: unpacked = unpack(packed) except Exception: raise ValueError("unpacker could not handle the packer's output") # check datetime support msg = dict(t=datetime.now()) try: unpacked = unpack(pack(msg)) except Exception: self.pack = lambda o: pack(squash_dates(o)) self.unpack = lambda s: extract_dates(unpack(s))
python
{ "resource": "" }
q280540
Session.msg
test
def msg(self, msg_type, content=None, parent=None, subheader=None, header=None): """Return the nested message dict. This format is different from what is sent over the wire. The serialize/unserialize methods converts this nested message dict to the wire format, which is a list of message parts. """ msg = {} header = self.msg_header(msg_type) if header is None else header msg['header'] = header msg['msg_id'] = header['msg_id'] msg['msg_type'] = header['msg_type'] msg['parent_header'] = {} if parent is None else extract_header(parent) msg['content'] = {} if content is None else content sub = {} if subheader is None else subheader msg['header'].update(sub) return msg
python
{ "resource": "" }
q280541
Session.sign
test
def sign(self, msg_list): """Sign a message with HMAC digest. If no auth, return b''. Parameters ---------- msg_list : list The [p_header,p_parent,p_content] part of the message list. """ if self.auth is None: return b'' h = self.auth.copy() for m in msg_list: h.update(m) return str_to_bytes(h.hexdigest())
python
{ "resource": "" }
q280542
Session.serialize
test
def serialize(self, msg, ident=None): """Serialize the message components to bytes. This is roughly the inverse of unserialize. The serialize/unserialize methods work with full message lists, whereas pack/unpack work with the individual message parts in the message list. Parameters ---------- msg : dict or Message The nexted message dict as returned by the self.msg method. Returns ------- msg_list : list The list of bytes objects to be sent with the format: [ident1,ident2,...,DELIM,HMAC,p_header,p_parent,p_content, buffer1,buffer2,...]. In this list, the p_* entities are the packed or serialized versions, so if JSON is used, these are utf8 encoded JSON strings. """ content = msg.get('content', {}) if content is None: content = self.none elif isinstance(content, dict): content = self.pack(content) elif isinstance(content, bytes): # content is already packed, as in a relayed message pass elif isinstance(content, unicode): # should be bytes, but JSON often spits out unicode content = content.encode('utf8') else: raise TypeError("Content incorrect type: %s"%type(content)) real_message = [self.pack(msg['header']), self.pack(msg['parent_header']), content ] to_send = [] if isinstance(ident, list): # accept list of idents to_send.extend(ident) elif ident is not None: to_send.append(ident) to_send.append(DELIM) signature = self.sign(real_message) to_send.append(signature) to_send.extend(real_message) return to_send
python
{ "resource": "" }
q280543
Session.send
test
def send(self, stream, msg_or_type, content=None, parent=None, ident=None, buffers=None, subheader=None, track=False, header=None): """Build and send a message via stream or socket. The message format used by this function internally is as follows: [ident1,ident2,...,DELIM,HMAC,p_header,p_parent,p_content, buffer1,buffer2,...] The serialize/unserialize methods convert the nested message dict into this format. Parameters ---------- stream : zmq.Socket or ZMQStream The socket-like object used to send the data. msg_or_type : str or Message/dict Normally, msg_or_type will be a msg_type unless a message is being sent more than once. If a header is supplied, this can be set to None and the msg_type will be pulled from the header. content : dict or None The content of the message (ignored if msg_or_type is a message). header : dict or None The header dict for the message (ignores if msg_to_type is a message). parent : Message or dict or None The parent or parent header describing the parent of this message (ignored if msg_or_type is a message). ident : bytes or list of bytes The zmq.IDENTITY routing path. subheader : dict or None Extra header keys for this message's header (ignored if msg_or_type is a message). buffers : list or None The already-serialized buffers to be appended to the message. track : bool Whether to track. Only for use with Sockets, because ZMQStream objects cannot track messages. Returns ------- msg : dict The constructed message. (msg,tracker) : (dict, MessageTracker) if track=True, then a 2-tuple will be returned, the first element being the constructed message, and the second being the MessageTracker """ if not isinstance(stream, (zmq.Socket, ZMQStream)): raise TypeError("stream must be Socket or ZMQStream, not %r"%type(stream)) elif track and isinstance(stream, ZMQStream): raise TypeError("ZMQStream cannot track messages") if isinstance(msg_or_type, (Message, dict)): # We got a Message or message dict, not a msg_type so don't # build a new Message. msg = msg_or_type else: msg = self.msg(msg_or_type, content=content, parent=parent, subheader=subheader, header=header) buffers = [] if buffers is None else buffers to_send = self.serialize(msg, ident) flag = 0 if buffers: flag = zmq.SNDMORE _track = False else: _track=track if track: tracker = stream.send_multipart(to_send, flag, copy=False, track=_track) else: tracker = stream.send_multipart(to_send, flag, copy=False) for b in buffers[:-1]: stream.send(b, flag, copy=False) if buffers: if track: tracker = stream.send(buffers[-1], copy=False, track=track) else: tracker = stream.send(buffers[-1], copy=False) # omsg = Message(msg) if self.debug: pprint.pprint(msg) pprint.pprint(to_send) pprint.pprint(buffers) msg['tracker'] = tracker return msg
python
{ "resource": "" }
q280544
Session.send_raw
test
def send_raw(self, stream, msg_list, flags=0, copy=True, ident=None): """Send a raw message via ident path. This method is used to send a already serialized message. Parameters ---------- stream : ZMQStream or Socket The ZMQ stream or socket to use for sending the message. msg_list : list The serialized list of messages to send. This only includes the [p_header,p_parent,p_content,buffer1,buffer2,...] portion of the message. ident : ident or list A single ident or a list of idents to use in sending. """ to_send = [] if isinstance(ident, bytes): ident = [ident] if ident is not None: to_send.extend(ident) to_send.append(DELIM) to_send.append(self.sign(msg_list)) to_send.extend(msg_list) stream.send_multipart(msg_list, flags, copy=copy)
python
{ "resource": "" }
q280545
Session.recv
test
def recv(self, socket, mode=zmq.NOBLOCK, content=True, copy=True): """Receive and unpack a message. Parameters ---------- socket : ZMQStream or Socket The socket or stream to use in receiving. Returns ------- [idents], msg [idents] is a list of idents and msg is a nested message dict of same format as self.msg returns. """ if isinstance(socket, ZMQStream): socket = socket.socket try: msg_list = socket.recv_multipart(mode, copy=copy) except zmq.ZMQError as e: if e.errno == zmq.EAGAIN: # We can convert EAGAIN to None as we know in this case # recv_multipart won't return None. return None,None else: raise # split multipart message into identity list and message dict # invalid large messages can cause very expensive string comparisons idents, msg_list = self.feed_identities(msg_list, copy) try: return idents, self.unserialize(msg_list, content=content, copy=copy) except Exception as e: # TODO: handle it raise e
python
{ "resource": "" }
q280546
Session.feed_identities
test
def feed_identities(self, msg_list, copy=True): """Split the identities from the rest of the message. Feed until DELIM is reached, then return the prefix as idents and remainder as msg_list. This is easily broken by setting an IDENT to DELIM, but that would be silly. Parameters ---------- msg_list : a list of Message or bytes objects The message to be split. copy : bool flag determining whether the arguments are bytes or Messages Returns ------- (idents, msg_list) : two lists idents will always be a list of bytes, each of which is a ZMQ identity. msg_list will be a list of bytes or zmq.Messages of the form [HMAC,p_header,p_parent,p_content,buffer1,buffer2,...] and should be unpackable/unserializable via self.unserialize at this point. """ if copy: idx = msg_list.index(DELIM) return msg_list[:idx], msg_list[idx+1:] else: failed = True for idx,m in enumerate(msg_list): if m.bytes == DELIM: failed = False break if failed: raise ValueError("DELIM not in msg_list") idents, msg_list = msg_list[:idx], msg_list[idx+1:] return [m.bytes for m in idents], msg_list
python
{ "resource": "" }
q280547
Session.unserialize
test
def unserialize(self, msg_list, content=True, copy=True): """Unserialize a msg_list to a nested message dict. This is roughly the inverse of serialize. The serialize/unserialize methods work with full message lists, whereas pack/unpack work with the individual message parts in the message list. Parameters: ----------- msg_list : list of bytes or Message objects The list of message parts of the form [HMAC,p_header,p_parent, p_content,buffer1,buffer2,...]. content : bool (True) Whether to unpack the content dict (True), or leave it packed (False). copy : bool (True) Whether to return the bytes (True), or the non-copying Message object in each place (False). Returns ------- msg : dict The nested message dict with top-level keys [header, parent_header, content, buffers]. """ minlen = 4 message = {} if not copy: for i in range(minlen): msg_list[i] = msg_list[i].bytes if self.auth is not None: signature = msg_list[0] if not signature: raise ValueError("Unsigned Message") if signature in self.digest_history: raise ValueError("Duplicate Signature: %r"%signature) self.digest_history.add(signature) check = self.sign(msg_list[1:4]) if not signature == check: raise ValueError("Invalid Signature: %r"%signature) if not len(msg_list) >= minlen: raise TypeError("malformed message, must have at least %i elements"%minlen) header = self.unpack(msg_list[1]) message['header'] = header message['msg_id'] = header['msg_id'] message['msg_type'] = header['msg_type'] message['parent_header'] = self.unpack(msg_list[2]) if content: message['content'] = self.unpack(msg_list[3]) else: message['content'] = msg_list[3] message['buffers'] = msg_list[4:] return message
python
{ "resource": "" }
q280548
save_svg
test
def save_svg(string, parent=None): """ Prompts the user to save an SVG document to disk. Parameters: ----------- string : basestring A Python string containing a SVG document. parent : QWidget, optional The parent to use for the file dialog. Returns: -------- The name of the file to which the document was saved, or None if the save was cancelled. """ if isinstance(string, unicode): string = string.encode('utf-8') dialog = QtGui.QFileDialog(parent, 'Save SVG Document') dialog.setAcceptMode(QtGui.QFileDialog.AcceptSave) dialog.setDefaultSuffix('svg') dialog.setNameFilter('SVG document (*.svg)') if dialog.exec_(): filename = dialog.selectedFiles()[0] f = open(filename, 'w') try: f.write(string) finally: f.close() return filename return None
python
{ "resource": "" }
q280549
svg_to_clipboard
test
def svg_to_clipboard(string): """ Copy a SVG document to the clipboard. Parameters: ----------- string : basestring A Python string containing a SVG document. """ if isinstance(string, unicode): string = string.encode('utf-8') mime_data = QtCore.QMimeData() mime_data.setData('image/svg+xml', string) QtGui.QApplication.clipboard().setMimeData(mime_data)
python
{ "resource": "" }
q280550
svg_to_image
test
def svg_to_image(string, size=None): """ Convert a SVG document to a QImage. Parameters: ----------- string : basestring A Python string containing a SVG document. size : QSize, optional The size of the image that is produced. If not specified, the SVG document's default size is used. Raises: ------- ValueError If an invalid SVG string is provided. Returns: -------- A QImage of format QImage.Format_ARGB32. """ if isinstance(string, unicode): string = string.encode('utf-8') renderer = QtSvg.QSvgRenderer(QtCore.QByteArray(string)) if not renderer.isValid(): raise ValueError('Invalid SVG data.') if size is None: size = renderer.defaultSize() image = QtGui.QImage(size, QtGui.QImage.Format_ARGB32) painter = QtGui.QPainter(image) renderer.render(painter) return image
python
{ "resource": "" }
q280551
object_info
test
def object_info(**kw): """Make an object info dict with all fields present.""" infodict = dict(izip_longest(info_fields, [None])) infodict.update(kw) return infodict
python
{ "resource": "" }
q280552
getdoc
test
def getdoc(obj): """Stable wrapper around inspect.getdoc. This can't crash because of attribute problems. It also attempts to call a getdoc() method on the given object. This allows objects which provide their docstrings via non-standard mechanisms (like Pyro proxies) to still be inspected by ipython's ? system.""" # Allow objects to offer customized documentation via a getdoc method: try: ds = obj.getdoc() except Exception: pass else: # if we get extra info, we add it to the normal docstring. if isinstance(ds, basestring): return inspect.cleandoc(ds) try: return inspect.getdoc(obj) except Exception: # Harden against an inspect failure, which can occur with # SWIG-wrapped extensions. return None
python
{ "resource": "" }
q280553
getsource
test
def getsource(obj,is_binary=False): """Wrapper around inspect.getsource. This can be modified by other projects to provide customized source extraction. Inputs: - obj: an object whose source code we will attempt to extract. Optional inputs: - is_binary: whether the object is known to come from a binary source. This implementation will skip returning any output for binary objects, but custom extractors may know how to meaningfully process them.""" if is_binary: return None else: # get source if obj was decorated with @decorator if hasattr(obj,"__wrapped__"): obj = obj.__wrapped__ try: src = inspect.getsource(obj) except TypeError: if hasattr(obj,'__class__'): src = inspect.getsource(obj.__class__) return src
python
{ "resource": "" }
q280554
getargspec
test
def getargspec(obj): """Get the names and default values of a function's arguments. A tuple of four things is returned: (args, varargs, varkw, defaults). 'args' is a list of the argument names (it may contain nested lists). 'varargs' and 'varkw' are the names of the * and ** arguments or None. 'defaults' is an n-tuple of the default values of the last n arguments. Modified version of inspect.getargspec from the Python Standard Library.""" if inspect.isfunction(obj): func_obj = obj elif inspect.ismethod(obj): func_obj = obj.im_func elif hasattr(obj, '__call__'): func_obj = obj.__call__ else: raise TypeError('arg is not a Python function') args, varargs, varkw = inspect.getargs(func_obj.func_code) return args, varargs, varkw, func_obj.func_defaults
python
{ "resource": "" }
q280555
call_tip
test
def call_tip(oinfo, format_call=True): """Extract call tip data from an oinfo dict. Parameters ---------- oinfo : dict format_call : bool, optional If True, the call line is formatted and returned as a string. If not, a tuple of (name, argspec) is returned. Returns ------- call_info : None, str or (str, dict) tuple. When format_call is True, the whole call information is formattted as a single string. Otherwise, the object's name and its argspec dict are returned. If no call information is available, None is returned. docstring : str or None The most relevant docstring for calling purposes is returned, if available. The priority is: call docstring for callable instances, then constructor docstring for classes, then main object's docstring otherwise (regular functions). """ # Get call definition argspec = oinfo.get('argspec') if argspec is None: call_line = None else: # Callable objects will have 'self' as their first argument, prune # it out if it's there for clarity (since users do *not* pass an # extra first argument explicitly). try: has_self = argspec['args'][0] == 'self' except (KeyError, IndexError): pass else: if has_self: argspec['args'] = argspec['args'][1:] call_line = oinfo['name']+format_argspec(argspec) # Now get docstring. # The priority is: call docstring, constructor docstring, main one. doc = oinfo.get('call_docstring') if doc is None: doc = oinfo.get('init_docstring') if doc is None: doc = oinfo.get('docstring','') return call_line, doc
python
{ "resource": "" }
q280556
find_file
test
def find_file(obj): """Find the absolute path to the file where an object was defined. This is essentially a robust wrapper around `inspect.getabsfile`. Returns None if no file can be found. Parameters ---------- obj : any Python object Returns ------- fname : str The absolute path to the file where the object was defined. """ # get source if obj was decorated with @decorator if hasattr(obj, '__wrapped__'): obj = obj.__wrapped__ fname = None try: fname = inspect.getabsfile(obj) except TypeError: # For an instance, the file that matters is where its class was # declared. if hasattr(obj, '__class__'): try: fname = inspect.getabsfile(obj.__class__) except TypeError: # Can happen for builtins pass except: pass return fname
python
{ "resource": "" }
q280557
find_source_lines
test
def find_source_lines(obj): """Find the line number in a file where an object was defined. This is essentially a robust wrapper around `inspect.getsourcelines`. Returns None if no file can be found. Parameters ---------- obj : any Python object Returns ------- lineno : int The line number where the object definition starts. """ # get source if obj was decorated with @decorator if hasattr(obj, '__wrapped__'): obj = obj.__wrapped__ try: try: lineno = inspect.getsourcelines(obj)[1] except TypeError: # For instances, try the class object like getsource() does if hasattr(obj, '__class__'): lineno = inspect.getsourcelines(obj.__class__)[1] except: return None return lineno
python
{ "resource": "" }
q280558
Inspector._getdef
test
def _getdef(self,obj,oname=''): """Return the definition header for any callable object. If any exception is generated, None is returned instead and the exception is suppressed.""" try: # We need a plain string here, NOT unicode! hdef = oname + inspect.formatargspec(*getargspec(obj)) return py3compat.unicode_to_str(hdef, 'ascii') except: return None
python
{ "resource": "" }
q280559
Inspector.__head
test
def __head(self,h): """Return a header string with proper colors.""" return '%s%s%s' % (self.color_table.active_colors.header,h, self.color_table.active_colors.normal)
python
{ "resource": "" }
q280560
Inspector.noinfo
test
def noinfo(self, msg, oname): """Generic message when no information is found.""" print 'No %s found' % msg, if oname: print 'for %s' % oname else: print
python
{ "resource": "" }
q280561
Inspector.pdef
test
def pdef(self, obj, oname=''): """Print the definition header for any callable object. If the object is a class, print the constructor information.""" if not callable(obj): print 'Object is not callable.' return header = '' if inspect.isclass(obj): header = self.__head('Class constructor information:\n') obj = obj.__init__ elif (not py3compat.PY3) and type(obj) is types.InstanceType: obj = obj.__call__ output = self._getdef(obj,oname) if output is None: self.noinfo('definition header',oname) else: print >>io.stdout, header,self.format(output),
python
{ "resource": "" }
q280562
Inspector.pdoc
test
def pdoc(self,obj,oname='',formatter = None): """Print the docstring for any object. Optional: -formatter: a function to run the docstring through for specially formatted docstrings. Examples -------- In [1]: class NoInit: ...: pass In [2]: class NoDoc: ...: def __init__(self): ...: pass In [3]: %pdoc NoDoc No documentation found for NoDoc In [4]: %pdoc NoInit No documentation found for NoInit In [5]: obj = NoInit() In [6]: %pdoc obj No documentation found for obj In [5]: obj2 = NoDoc() In [6]: %pdoc obj2 No documentation found for obj2 """ head = self.__head # For convenience lines = [] ds = getdoc(obj) if formatter: ds = formatter(ds) if ds: lines.append(head("Class Docstring:")) lines.append(indent(ds)) if inspect.isclass(obj) and hasattr(obj, '__init__'): init_ds = getdoc(obj.__init__) if init_ds is not None: lines.append(head("Constructor Docstring:")) lines.append(indent(init_ds)) elif hasattr(obj,'__call__'): call_ds = getdoc(obj.__call__) if call_ds: lines.append(head("Calling Docstring:")) lines.append(indent(call_ds)) if not lines: self.noinfo('documentation',oname) else: page.page('\n'.join(lines))
python
{ "resource": "" }
q280563
Inspector.psource
test
def psource(self,obj,oname=''): """Print the source code for an object.""" # Flush the source cache because inspect can return out-of-date source linecache.checkcache() try: src = getsource(obj) except: self.noinfo('source',oname) else: page.page(self.format(py3compat.unicode_to_str(src)))
python
{ "resource": "" }
q280564
Inspector.pfile
test
def pfile(self, obj, oname=''): """Show the whole file where an object was defined.""" lineno = find_source_lines(obj) if lineno is None: self.noinfo('file', oname) return ofile = find_file(obj) # run contents of file through pager starting at line where the object # is defined, as long as the file isn't binary and is actually on the # filesystem. if ofile.endswith(('.so', '.dll', '.pyd')): print 'File %r is binary, not printing.' % ofile elif not os.path.isfile(ofile): print 'File %r does not exist, not printing.' % ofile else: # Print only text files, not extension binaries. Note that # getsourcelines returns lineno with 1-offset and page() uses # 0-offset, so we must adjust. page.page(self.format(open(ofile).read()), lineno-1)
python
{ "resource": "" }
q280565
Inspector._format_fields
test
def _format_fields(self, fields, title_width=12): """Formats a list of fields for display. Parameters ---------- fields : list A list of 2-tuples: (field_title, field_content) title_width : int How many characters to pad titles to. Default 12. """ out = [] header = self.__head for title, content in fields: if len(content.splitlines()) > 1: title = header(title + ":") + "\n" else: title = header((title+":").ljust(title_width)) out.append(title + content) return "\n".join(out)
python
{ "resource": "" }
q280566
Inspector.pinfo
test
def pinfo(self,obj,oname='',formatter=None,info=None,detail_level=0): """Show detailed information about an object. Optional arguments: - oname: name of the variable pointing to the object. - formatter: special formatter for docstrings (see pdoc) - info: a structure with some information fields which may have been precomputed already. - detail_level: if set to 1, more information is given. """ info = self.info(obj, oname=oname, formatter=formatter, info=info, detail_level=detail_level) displayfields = [] def add_fields(fields): for title, key in fields: field = info[key] if field is not None: displayfields.append((title, field.rstrip())) add_fields(self.pinfo_fields1) # Base class for old-style instances if (not py3compat.PY3) and isinstance(obj, types.InstanceType) and info['base_class']: displayfields.append(("Base Class", info['base_class'].rstrip())) add_fields(self.pinfo_fields2) # Namespace if info['namespace'] != 'Interactive': displayfields.append(("Namespace", info['namespace'].rstrip())) add_fields(self.pinfo_fields3) # Source or docstring, depending on detail level and whether # source found. if detail_level > 0 and info['source'] is not None: displayfields.append(("Source", self.format(py3compat.cast_bytes_py2(info['source'])))) elif info['docstring'] is not None: displayfields.append(("Docstring", info["docstring"])) # Constructor info for classes if info['isclass']: if info['init_definition'] or info['init_docstring']: displayfields.append(("Constructor information", "")) if info['init_definition'] is not None: displayfields.append((" Definition", info['init_definition'].rstrip())) if info['init_docstring'] is not None: displayfields.append((" Docstring", indent(info['init_docstring']))) # Info for objects: else: add_fields(self.pinfo_fields_obj) # Finally send to printer/pager: if displayfields: page.page(self._format_fields(displayfields))
python
{ "resource": "" }
q280567
Inspector.psearch
test
def psearch(self,pattern,ns_table,ns_search=[], ignore_case=False,show_all=False): """Search namespaces with wildcards for objects. Arguments: - pattern: string containing shell-like wildcards to use in namespace searches and optionally a type specification to narrow the search to objects of that type. - ns_table: dict of name->namespaces for search. Optional arguments: - ns_search: list of namespace names to include in search. - ignore_case(False): make the search case-insensitive. - show_all(False): show all names, including those starting with underscores. """ #print 'ps pattern:<%r>' % pattern # dbg # defaults type_pattern = 'all' filter = '' cmds = pattern.split() len_cmds = len(cmds) if len_cmds == 1: # Only filter pattern given filter = cmds[0] elif len_cmds == 2: # Both filter and type specified filter,type_pattern = cmds else: raise ValueError('invalid argument string for psearch: <%s>' % pattern) # filter search namespaces for name in ns_search: if name not in ns_table: raise ValueError('invalid namespace <%s>. Valid names: %s' % (name,ns_table.keys())) #print 'type_pattern:',type_pattern # dbg search_result, namespaces_seen = set(), set() for ns_name in ns_search: ns = ns_table[ns_name] # Normally, locals and globals are the same, so we just check one. if id(ns) in namespaces_seen: continue namespaces_seen.add(id(ns)) tmp_res = list_namespace(ns, type_pattern, filter, ignore_case=ignore_case, show_all=show_all) search_result.update(tmp_res) page.page('\n'.join(sorted(search_result)))
python
{ "resource": "" }
q280568
threaded_reactor
test
def threaded_reactor(): """ Start the Twisted reactor in a separate thread, if not already done. Returns the reactor. The thread will automatically be destroyed when all the tests are done. """ global _twisted_thread try: from twisted.internet import reactor except ImportError: return None, None if not _twisted_thread: from twisted.python import threadable from threading import Thread _twisted_thread = Thread(target=lambda: reactor.run( \ installSignalHandlers=False)) _twisted_thread.setDaemon(True) _twisted_thread.start() return reactor, _twisted_thread
python
{ "resource": "" }
q280569
deferred
test
def deferred(timeout=None): """ By wrapping a test function with this decorator, you can return a twisted Deferred and the test will wait for the deferred to be triggered. The whole test function will run inside the Twisted event loop. The optional timeout parameter specifies the maximum duration of the test. The difference with timed() is that timed() will still wait for the test to end, while deferred() will stop the test when its timeout has expired. The latter is more desireable when dealing with network tests, because the result may actually never arrive. If the callback is triggered, the test has passed. If the errback is triggered or the timeout expires, the test has failed. Example:: @deferred(timeout=5.0) def test_resolve(): return reactor.resolve("www.python.org") Attention! If you combine this decorator with other decorators (like "raises"), deferred() must be called *first*! In other words, this is good:: @raises(DNSLookupError) @deferred() def test_error(): return reactor.resolve("xxxjhjhj.biz") and this is bad:: @deferred() @raises(DNSLookupError) def test_error(): return reactor.resolve("xxxjhjhj.biz") """ reactor, reactor_thread = threaded_reactor() if reactor is None: raise ImportError("twisted is not available or could not be imported") # Check for common syntax mistake # (otherwise, tests can be silently ignored # if one writes "@deferred" instead of "@deferred()") try: timeout is None or timeout + 0 except TypeError: raise TypeError("'timeout' argument must be a number or None") def decorate(func): def wrapper(*args, **kargs): q = Queue() def callback(value): q.put(None) def errback(failure): # Retrieve and save full exception info try: failure.raiseException() except: q.put(sys.exc_info()) def g(): try: d = func(*args, **kargs) try: d.addCallbacks(callback, errback) # Check for a common mistake and display a nice error # message except AttributeError: raise TypeError("you must return a twisted Deferred " "from your test case!") # Catch exceptions raised in the test body (from the # Twisted thread) except: q.put(sys.exc_info()) reactor.callFromThread(g) try: error = q.get(timeout=timeout) except Empty: raise TimeExpired("timeout expired before end of test (%f s.)" % timeout) # Re-raise all exceptions if error is not None: exc_type, exc_value, tb = error raise exc_type, exc_value, tb wrapper = make_decorator(func)(wrapper) return wrapper return decorate
python
{ "resource": "" }
q280570
find_best_string
test
def find_best_string(query, corpus, step=4, flex=3, case_sensitive=False): """Return best matching substring of corpus. Parameters ---------- query : str corpus : str step : int Step size of first match-value scan through corpus. Can be thought of as a sort of "scan resolution". Should not exceed length of query. flex : int Max. left/right substring position adjustment value. Should not exceed length of query / 2. Outputs ------- output0 : str Best matching substring. output1 : float Match ratio of best matching substring. 1 is perfect match. """ def ratio(a, b): """Compact alias for SequenceMatcher.""" return SequenceMatcher(None, a, b).ratio() def scan_corpus(step): """Return list of match values from corpus-wide scan.""" match_values = [] m = 0 while m + qlen - step <= len(corpus): match_values.append(ratio(query, corpus[m : m-1+qlen])) m += step return match_values def index_max(v): """Return index of max value.""" return max(range(len(v)), key=v.__getitem__) def adjust_left_right_positions(): """Return left/right positions for best string match.""" # bp_* is synonym for 'Best Position Left/Right' and are adjusted # to optimize bmv_* p_l, bp_l = [pos] * 2 p_r, bp_r = [pos + qlen] * 2 # bmv_* are declared here in case they are untouched in optimization bmv_l = match_values[round_decimal(p_l / step)] bmv_r = match_values[round_decimal(p_r / step)] for f in range(flex): ll = ratio(query, corpus[p_l - f: p_r]) if ll > bmv_l: bmv_l = ll bp_l = p_l - f lr = ratio(query, corpus[p_l + f: p_r]) if lr > bmv_l: bmv_l = lr bp_l = p_l + f rl = ratio(query, corpus[p_l: p_r - f]) if rl > bmv_r: bmv_r = rl bp_r = p_r - f rr = ratio(query, corpus[p_l: p_r + f]) if rr > bmv_r: bmv_r = rr bp_r = p_r + f return bp_l, bp_r, ratio(query, corpus[bp_l : bp_r]) if not case_sensitive: query = query.lower() corpus = corpus.lower() qlen = len(query) if flex >= qlen/2: print("Warning: flex exceeds length of query / 2. Setting to default.") flex = 3 match_values = scan_corpus(step) pos = index_max(match_values) * step pos_left, pos_right, match_value = adjust_left_right_positions() return corpus[pos_left: pos_right].strip(), match_value
python
{ "resource": "" }
q280571
XMLEncoder.to_string
test
def to_string(self, indent=True, declaration=True): """Encodes the stored ``data`` to XML and returns a ``string``. Setting ``indent`` to ``False`` will forego any pretty-printing and return a condensed value. Setting ``declaration`` to ``False`` will skip inserting the XML declaration. """ return etree.tostring(self.to_xml(), encoding=self.encoding, xml_declaration=declaration, pretty_print=indent )
python
{ "resource": "" }
q280572
XMLEncoder.to_xml
test
def to_xml(self): """Encodes the stored ``data`` to XML and returns an ``lxml.etree`` value. """ if self.data: self.document = self._update_document(self.document, self.data) return self.document
python
{ "resource": "" }
q280573
load_all_modules_in_packages
test
def load_all_modules_in_packages(package_or_set_of_packages): """ Recursively loads all modules from a package object, or set of package objects :param package_or_set_of_packages: package object, or iterable of package objects :return: list of all unique modules discovered by the function """ if isinstance(package_or_set_of_packages, types.ModuleType): packages = [package_or_set_of_packages] elif isinstance(package_or_set_of_packages, Iterable) and not isinstance(package_or_set_of_packages, (dict, str)): packages = package_or_set_of_packages else: raise Exception("This function only accepts a module reference, or an iterable of said objects") imported = packages.copy() for package in packages: if not hasattr(package, '__path__'): raise Exception( 'Package object passed in has no __path__ attribute. ' 'Make sure to pass in imported references to the packages in question.' ) for module_finder, name, ispkg in pkgutil.walk_packages(package.__path__): module_name = '{}.{}'.format(package.__name__, name) current_module = importlib.import_module(module_name) imported.append(current_module) if ispkg: imported += load_all_modules_in_packages(current_module) for module in imported: # This is to cover cases where simply importing a module doesn't execute all the code/definitions within # I don't totally understand the reasons for this, but I do know enumerating a module's context (like with dir) # seems to solve things dir(module) return list( { module.__name__: module for module in imported }.values() )
python
{ "resource": "" }
q280574
Struct.__dict_invert
test
def __dict_invert(self, data): """Helper function for merge. Takes a dictionary whose values are lists and returns a dict with the elements of each list as keys and the original keys as values. """ outdict = {} for k,lst in data.items(): if isinstance(lst, str): lst = lst.split() for entry in lst: outdict[entry] = k return outdict
python
{ "resource": "" }
q280575
Struct.merge
test
def merge(self, __loc_data__=None, __conflict_solve=None, **kw): """Merge two Structs with customizable conflict resolution. This is similar to :meth:`update`, but much more flexible. First, a dict is made from data+key=value pairs. When merging this dict with the Struct S, the optional dictionary 'conflict' is used to decide what to do. If conflict is not given, the default behavior is to preserve any keys with their current value (the opposite of the :meth:`update` method's behavior). Parameters ---------- __loc_data : dict, Struct The data to merge into self __conflict_solve : dict The conflict policy dict. The keys are binary functions used to resolve the conflict and the values are lists of strings naming the keys the conflict resolution function applies to. Instead of a list of strings a space separated string can be used, like 'a b c'. kw : dict Additional key, value pairs to merge in Notes ----- The `__conflict_solve` dict is a dictionary of binary functions which will be used to solve key conflicts. Here is an example:: __conflict_solve = dict( func1=['a','b','c'], func2=['d','e'] ) In this case, the function :func:`func1` will be used to resolve keys 'a', 'b' and 'c' and the function :func:`func2` will be used for keys 'd' and 'e'. This could also be written as:: __conflict_solve = dict(func1='a b c',func2='d e') These functions will be called for each key they apply to with the form:: func1(self['a'], other['a']) The return value is used as the final merged value. As a convenience, merge() provides five (the most commonly needed) pre-defined policies: preserve, update, add, add_flip and add_s. The easiest explanation is their implementation:: preserve = lambda old,new: old update = lambda old,new: new add = lambda old,new: old + new add_flip = lambda old,new: new + old # note change of order! add_s = lambda old,new: old + ' ' + new # only for str! You can use those four words (as strings) as keys instead of defining them as functions, and the merge method will substitute the appropriate functions for you. For more complicated conflict resolution policies, you still need to construct your own functions. Examples -------- This show the default policy: >>> s = Struct(a=10,b=30) >>> s2 = Struct(a=20,c=40) >>> s.merge(s2) >>> sorted(s.items()) [('a', 10), ('b', 30), ('c', 40)] Now, show how to specify a conflict dict: >>> s = Struct(a=10,b=30) >>> s2 = Struct(a=20,b=40) >>> conflict = {'update':'a','add':'b'} >>> s.merge(s2,conflict) >>> sorted(s.items()) [('a', 20), ('b', 70)] """ data_dict = dict(__loc_data__,**kw) # policies for conflict resolution: two argument functions which return # the value that will go in the new struct preserve = lambda old,new: old update = lambda old,new: new add = lambda old,new: old + new add_flip = lambda old,new: new + old # note change of order! add_s = lambda old,new: old + ' ' + new # default policy is to keep current keys when there's a conflict conflict_solve = list2dict2(self.keys(), default = preserve) # the conflict_solve dictionary is given by the user 'inverted': we # need a name-function mapping, it comes as a function -> names # dict. Make a local copy (b/c we'll make changes), replace user # strings for the three builtin policies and invert it. if __conflict_solve: inv_conflict_solve_user = __conflict_solve.copy() for name, func in [('preserve',preserve), ('update',update), ('add',add), ('add_flip',add_flip), ('add_s',add_s)]: if name in inv_conflict_solve_user.keys(): inv_conflict_solve_user[func] = inv_conflict_solve_user[name] del inv_conflict_solve_user[name] conflict_solve.update(self.__dict_invert(inv_conflict_solve_user)) for key in data_dict: if key not in self: self[key] = data_dict[key] else: self[key] = conflict_solve[key](self[key],data_dict[key])
python
{ "resource": "" }
q280576
object_to_primitive
test
def object_to_primitive(obj): ''' convert object to primitive type so we can serialize it to data format like python. all primitive types: dict, list, int, float, bool, str, None ''' if obj is None: return obj if isinstance(obj, (int, float, bool, str)): return obj if isinstance(obj, (list, frozenset, set)): return [object_to_primitive(x) for x in obj] if isinstance(obj, dict): return dict([(object_to_primitive(k), object_to_primitive(v)) for k, v in obj.items()]) data = vars(obj) assert isinstance(data, dict) return object_to_primitive(data)
python
{ "resource": "" }
q280577
Parser.format2
test
def format2(self, raw, out = None, scheme = ''): """ Parse and send the colored source. If out and scheme are not specified, the defaults (given to constructor) are used. out should be a file-type object. Optionally, out can be given as the string 'str' and the parser will automatically return the output in a string.""" string_output = 0 if out == 'str' or self.out == 'str' or \ isinstance(self.out,StringIO.StringIO): # XXX - I don't really like this state handling logic, but at this # point I don't want to make major changes, so adding the # isinstance() check is the simplest I can do to ensure correct # behavior. out_old = self.out self.out = StringIO.StringIO() string_output = 1 elif out is not None: self.out = out # Fast return of the unmodified input for NoColor scheme if scheme == 'NoColor': error = False self.out.write(raw) if string_output: return raw,error else: return None,error # local shorthands colors = self.color_table[scheme].colors self.colors = colors # put in object so __call__ sees it # Remove trailing whitespace and normalize tabs self.raw = raw.expandtabs().rstrip() # store line offsets in self.lines self.lines = [0, 0] pos = 0 raw_find = self.raw.find lines_append = self.lines.append while 1: pos = raw_find('\n', pos) + 1 if not pos: break lines_append(pos) lines_append(len(self.raw)) # parse the source and write it self.pos = 0 text = StringIO.StringIO(self.raw) error = False try: for atoken in generate_tokens(text.readline): self(*atoken) except tokenize.TokenError as ex: msg = ex.args[0] line = ex.args[1][0] self.out.write("%s\n\n*** ERROR: %s%s%s\n" % (colors[token.ERRORTOKEN], msg, self.raw[self.lines[line]:], colors.normal) ) error = True self.out.write(colors.normal+'\n') if string_output: output = self.out.getvalue() self.out = out_old return (output, error) return (None, error)
python
{ "resource": "" }
q280578
getfigs
test
def getfigs(*fig_nums): """Get a list of matplotlib figures by figure numbers. If no arguments are given, all available figures are returned. If the argument list contains references to invalid figures, a warning is printed but the function continues pasting further figures. Parameters ---------- figs : tuple A tuple of ints giving the figure numbers of the figures to return. """ from matplotlib._pylab_helpers import Gcf if not fig_nums: fig_managers = Gcf.get_all_fig_managers() return [fm.canvas.figure for fm in fig_managers] else: figs = [] for num in fig_nums: f = Gcf.figs.get(num) if f is None: print('Warning: figure %s not available.' % num) else: figs.append(f.canvas.figure) return figs
python
{ "resource": "" }
q280579
print_figure
test
def print_figure(fig, fmt='png'): """Convert a figure to svg or png for inline display.""" # When there's an empty figure, we shouldn't return anything, otherwise we # get big blank areas in the qt console. if not fig.axes and not fig.lines: return fc = fig.get_facecolor() ec = fig.get_edgecolor() fig.set_facecolor('white') fig.set_edgecolor('white') try: bytes_io = BytesIO() fig.canvas.print_figure(bytes_io, format=fmt, bbox_inches='tight') data = bytes_io.getvalue() finally: fig.set_facecolor(fc) fig.set_edgecolor(ec) return data
python
{ "resource": "" }
q280580
mpl_runner
test
def mpl_runner(safe_execfile): """Factory to return a matplotlib-enabled runner for %run. Parameters ---------- safe_execfile : function This must be a function with the same interface as the :meth:`safe_execfile` method of IPython. Returns ------- A function suitable for use as the ``runner`` argument of the %run magic function. """ def mpl_execfile(fname,*where,**kw): """matplotlib-aware wrapper around safe_execfile. Its interface is identical to that of the :func:`execfile` builtin. This is ultimately a call to execfile(), but wrapped in safeties to properly handle interactive rendering.""" import matplotlib import matplotlib.pylab as pylab #print '*** Matplotlib runner ***' # dbg # turn off rendering until end of script is_interactive = matplotlib.rcParams['interactive'] matplotlib.interactive(False) safe_execfile(fname,*where,**kw) matplotlib.interactive(is_interactive) # make rendering call now, if the user tried to do it if pylab.draw_if_interactive.called: pylab.draw() pylab.draw_if_interactive.called = False return mpl_execfile
python
{ "resource": "" }
q280581
select_figure_format
test
def select_figure_format(shell, fmt): """Select figure format for inline backend, either 'png' or 'svg'. Using this method ensures only one figure format is active at a time. """ from matplotlib.figure import Figure from IPython.zmq.pylab import backend_inline svg_formatter = shell.display_formatter.formatters['image/svg+xml'] png_formatter = shell.display_formatter.formatters['image/png'] if fmt=='png': svg_formatter.type_printers.pop(Figure, None) png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png')) elif fmt=='svg': png_formatter.type_printers.pop(Figure, None) svg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'svg')) else: raise ValueError("supported formats are: 'png', 'svg', not %r"%fmt) # set the format to be used in the backend() backend_inline._figure_format = fmt
python
{ "resource": "" }
q280582
find_gui_and_backend
test
def find_gui_and_backend(gui=None): """Given a gui string return the gui and mpl backend. Parameters ---------- gui : str Can be one of ('tk','gtk','wx','qt','qt4','inline'). Returns ------- A tuple of (gui, backend) where backend is one of ('TkAgg','GTKAgg', 'WXAgg','Qt4Agg','module://IPython.zmq.pylab.backend_inline'). """ import matplotlib if gui and gui != 'auto': # select backend based on requested gui backend = backends[gui] else: backend = matplotlib.rcParams['backend'] # In this case, we need to find what the appropriate gui selection call # should be for IPython, so we can activate inputhook accordingly gui = backend2gui.get(backend, None) return gui, backend
python
{ "resource": "" }
q280583
activate_matplotlib
test
def activate_matplotlib(backend): """Activate the given backend and set interactive to True.""" import matplotlib if backend.startswith('module://'): # Work around bug in matplotlib: matplotlib.use converts the # backend_id to lowercase even if a module name is specified! matplotlib.rcParams['backend'] = backend else: matplotlib.use(backend) matplotlib.interactive(True) # This must be imported last in the matplotlib series, after # backend/interactivity choices have been made import matplotlib.pylab as pylab # XXX For now leave this commented out, but depending on discussions with # mpl-dev, we may be able to allow interactive switching... #import matplotlib.pyplot #matplotlib.pyplot.switch_backend(backend) pylab.show._needmain = False # We need to detect at runtime whether show() is called by the user. # For this, we wrap it into a decorator which adds a 'called' flag. pylab.draw_if_interactive = flag_calls(pylab.draw_if_interactive)
python
{ "resource": "" }
q280584
configure_inline_support
test
def configure_inline_support(shell, backend, user_ns=None): """Configure an IPython shell object for matplotlib use. Parameters ---------- shell : InteractiveShell instance backend : matplotlib backend user_ns : dict A namespace where all configured variables will be placed. If not given, the `user_ns` attribute of the shell object is used. """ # If using our svg payload backend, register the post-execution # function that will pick up the results for display. This can only be # done with access to the real shell object. # Note: if we can't load the inline backend, then there's no point # continuing (such as in terminal-only shells in environments without # zeromq available). try: from IPython.zmq.pylab.backend_inline import InlineBackend except ImportError: return user_ns = shell.user_ns if user_ns is None else user_ns cfg = InlineBackend.instance(config=shell.config) cfg.shell = shell if cfg not in shell.configurables: shell.configurables.append(cfg) if backend == backends['inline']: from IPython.zmq.pylab.backend_inline import flush_figures from matplotlib import pyplot shell.register_post_execute(flush_figures) # load inline_rc pyplot.rcParams.update(cfg.rc) # Add 'figsize' to pyplot and to the user's namespace user_ns['figsize'] = pyplot.figsize = figsize # Setup the default figure format fmt = cfg.figure_format select_figure_format(shell, fmt) # The old pastefig function has been replaced by display from IPython.core.display import display # Add display and getfigs to the user's namespace user_ns['display'] = display user_ns['getfigs'] = getfigs
python
{ "resource": "" }
q280585
pylab_activate
test
def pylab_activate(user_ns, gui=None, import_all=True, shell=None): """Activate pylab mode in the user's namespace. Loads and initializes numpy, matplotlib and friends for interactive use. Parameters ---------- user_ns : dict Namespace where the imports will occur. gui : optional, string A valid gui name following the conventions of the %gui magic. import_all : optional, boolean If true, an 'import *' is done from numpy and pylab. Returns ------- The actual gui used (if not given as input, it was obtained from matplotlib itself, and will be needed next to configure IPython's gui integration. """ gui, backend = find_gui_and_backend(gui) activate_matplotlib(backend) import_pylab(user_ns, import_all) if shell is not None: configure_inline_support(shell, backend, user_ns) print """ Welcome to pylab, a matplotlib-based Python environment [backend: %s]. For more information, type 'help(pylab)'.""" % backend # flush stdout, just to be safe sys.stdout.flush() return gui
python
{ "resource": "" }
q280586
PyTracer._trace
test
def _trace(self, frame, event, arg_unused): """The trace function passed to sys.settrace.""" if self.stopped: return if 0: sys.stderr.write("trace event: %s %r @%d\n" % ( event, frame.f_code.co_filename, frame.f_lineno )) if self.last_exc_back: if frame == self.last_exc_back: # Someone forgot a return event. if self.arcs and self.cur_file_data: pair = (self.last_line, -self.last_exc_firstlineno) self.cur_file_data[pair] = None self.cur_file_data, self.last_line = self.data_stack.pop() self.last_exc_back = None if event == 'call': # Entering a new function context. Decide if we should trace # in this file. self.data_stack.append((self.cur_file_data, self.last_line)) filename = frame.f_code.co_filename if filename not in self.should_trace_cache: tracename = self.should_trace(filename, frame) self.should_trace_cache[filename] = tracename else: tracename = self.should_trace_cache[filename] #print("called, stack is %d deep, tracename is %r" % ( # len(self.data_stack), tracename)) if tracename: if tracename not in self.data: self.data[tracename] = {} self.cur_file_data = self.data[tracename] else: self.cur_file_data = None # Set the last_line to -1 because the next arc will be entering a # code block, indicated by (-1, n). self.last_line = -1 elif event == 'line': # Record an executed line. if self.cur_file_data is not None: if self.arcs: #print("lin", self.last_line, frame.f_lineno) self.cur_file_data[(self.last_line, frame.f_lineno)] = None else: #print("lin", frame.f_lineno) self.cur_file_data[frame.f_lineno] = None self.last_line = frame.f_lineno elif event == 'return': if self.arcs and self.cur_file_data: first = frame.f_code.co_firstlineno self.cur_file_data[(self.last_line, -first)] = None # Leaving this function, pop the filename stack. self.cur_file_data, self.last_line = self.data_stack.pop() #print("returned, stack is %d deep" % (len(self.data_stack))) elif event == 'exception': #print("exc", self.last_line, frame.f_lineno) self.last_exc_back = frame.f_back self.last_exc_firstlineno = frame.f_code.co_firstlineno return self._trace
python
{ "resource": "" }
q280587
PyTracer.start
test
def start(self): """Start this Tracer. Return a Python function suitable for use with sys.settrace(). """ self.thread = threading.currentThread() sys.settrace(self._trace) return self._trace
python
{ "resource": "" }
q280588
PyTracer.stop
test
def stop(self): """Stop this Tracer.""" self.stopped = True if self.thread != threading.currentThread(): # Called on a different thread than started us: we can't unhook # ourseves, but we've set the flag that we should stop, so we won't # do any more tracing. return if hasattr(sys, "gettrace") and self.warn: if sys.gettrace() != self._trace: msg = "Trace function changed, measurement is likely wrong: %r" self.warn(msg % (sys.gettrace(),)) #print("Stopping tracer on %s" % threading.current_thread().ident) sys.settrace(None)
python
{ "resource": "" }
q280589
Collector._start_tracer
test
def _start_tracer(self): """Start a new Tracer object, and store it in self.tracers.""" tracer = self._trace_class() tracer.data = self.data tracer.arcs = self.branch tracer.should_trace = self.should_trace tracer.should_trace_cache = self.should_trace_cache tracer.warn = self.warn fn = tracer.start() self.tracers.append(tracer) return fn
python
{ "resource": "" }
q280590
Collector._installation_trace
test
def _installation_trace(self, frame_unused, event_unused, arg_unused): """Called on new threads, installs the real tracer.""" # Remove ourselves as the trace function sys.settrace(None) # Install the real tracer. fn = self._start_tracer() # Invoke the real trace function with the current event, to be sure # not to lose an event. if fn: fn = fn(frame_unused, event_unused, arg_unused) # Return the new trace function to continue tracing in this scope. return fn
python
{ "resource": "" }
q280591
Collector.start
test
def start(self): """Start collecting trace information.""" if self._collectors: self._collectors[-1].pause() self._collectors.append(self) #print("Started: %r" % self._collectors, file=sys.stderr) # Check to see whether we had a fullcoverage tracer installed. traces0 = [] if hasattr(sys, "gettrace"): fn0 = sys.gettrace() if fn0: tracer0 = getattr(fn0, '__self__', None) if tracer0: traces0 = getattr(tracer0, 'traces', []) # Install the tracer on this thread. fn = self._start_tracer() for args in traces0: (frame, event, arg), lineno = args try: fn(frame, event, arg, lineno=lineno) except TypeError: raise Exception( "fullcoverage must be run with the C trace function." ) # Install our installation tracer in threading, to jump start other # threads. threading.settrace(self._installation_trace)
python
{ "resource": "" }
q280592
Collector.stop
test
def stop(self): """Stop collecting trace information.""" #print >>sys.stderr, "Stopping: %r" % self._collectors assert self._collectors assert self._collectors[-1] is self self.pause() self.tracers = [] # Remove this Collector from the stack, and resume the one underneath # (if any). self._collectors.pop() if self._collectors: self._collectors[-1].resume()
python
{ "resource": "" }
q280593
Collector.pause
test
def pause(self): """Pause tracing, but be prepared to `resume`.""" for tracer in self.tracers: tracer.stop() stats = tracer.get_stats() if stats: print("\nCoverage.py tracer stats:") for k in sorted(stats.keys()): print("%16s: %s" % (k, stats[k])) threading.settrace(None)
python
{ "resource": "" }
q280594
Collector.resume
test
def resume(self): """Resume tracing after a `pause`.""" for tracer in self.tracers: tracer.start() threading.settrace(self._installation_trace)
python
{ "resource": "" }
q280595
Collector.get_line_data
test
def get_line_data(self): """Return the line data collected. Data is { filename: { lineno: None, ...}, ...} """ if self.branch: # If we were measuring branches, then we have to re-build the dict # to show line data. line_data = {} for f, arcs in self.data.items(): line_data[f] = ldf = {} for l1, _ in list(arcs.keys()): if l1: ldf[l1] = None return line_data else: return self.data
python
{ "resource": "" }
q280596
collect_exceptions
test
def collect_exceptions(rdict_or_list, method='unspecified'): """check a result dict for errors, and raise CompositeError if any exist. Passthrough otherwise.""" elist = [] if isinstance(rdict_or_list, dict): rlist = rdict_or_list.values() else: rlist = rdict_or_list for r in rlist: if isinstance(r, RemoteError): en, ev, etb, ei = r.ename, r.evalue, r.traceback, r.engine_info # Sometimes we could have CompositeError in our list. Just take # the errors out of them and put them in our new list. This # has the effect of flattening lists of CompositeErrors into one # CompositeError if en=='CompositeError': for e in ev.elist: elist.append(e) else: elist.append((en, ev, etb, ei)) if len(elist)==0: return rdict_or_list else: msg = "one or more exceptions from call to method: %s" % (method) # This silliness is needed so the debugger has access to the exception # instance (e in this case) try: raise CompositeError(msg, elist) except CompositeError as e: raise e
python
{ "resource": "" }
q280597
CompositeError.render_traceback
test
def render_traceback(self, excid=None): """render one or all of my tracebacks to a list of lines""" lines = [] if excid is None: for (en,ev,etb,ei) in self.elist: lines.append(self._get_engine_str(ei)) lines.extend((etb or 'No traceback available').splitlines()) lines.append('') else: try: en,ev,etb,ei = self.elist[excid] except: raise IndexError("an exception with index %i does not exist"%excid) else: lines.append(self._get_engine_str(ei)) lines.extend((etb or 'No traceback available').splitlines()) return lines
python
{ "resource": "" }
q280598
process_startup
test
def process_startup(): """Call this at Python startup to perhaps measure coverage. If the environment variable COVERAGE_PROCESS_START is defined, coverage measurement is started. The value of the variable is the config file to use. There are two ways to configure your Python installation to invoke this function when Python starts: #. Create or append to sitecustomize.py to add these lines:: import coverage coverage.process_startup() #. Create a .pth file in your Python installation containing:: import coverage; coverage.process_startup() """ cps = os.environ.get("COVERAGE_PROCESS_START") if cps: cov = coverage(config_file=cps, auto_data=True) cov.start() cov._warn_no_data = False cov._warn_unimported_source = False
python
{ "resource": "" }
q280599
coverage._canonical_dir
test
def _canonical_dir(self, morf): """Return the canonical directory of the module or file `morf`.""" return os.path.split(CodeUnit(morf, self.file_locator).filename)[0]
python
{ "resource": "" }