code
stringlengths 4
4.48k
| docstring
stringlengths 1
6.45k
| _id
stringlengths 24
24
|
---|---|---|
@dev.command() <NEW_LINE> @click.pass_context <NEW_LINE> @click.argument("input", nargs=1, required=True, callback=alias_checker) <NEW_LINE> @click.argument("software", nargs=1, required=False, callback=alias_checker) <NEW_LINE> @click.argument("fp_or_searchkey", nargs=1, required=False, callback=alias_checker) <NEW_LINE> def keybindings(ctx, input, software, fp_or_searchkey): <NEW_LINE> <INDENT> input, software, fp_or_searchkey = get_arguments(ctx, 3) <NEW_LINE> _input = str(input) <NEW_LINE> _software = str(software) <NEW_LINE> _fp_or_searchkey = str(fp_or_searchkey) <NEW_LINE> create_folder(KEYBINDINGS_CONFIG_FOLDER_PATH) <NEW_LINE> check_sub_command_keybindings(_input, _software, _fp_or_searchkey) | This command can be used to save or search keybindings for different softwares.
yoda dev keybindings INPUT[add,search] SOFTWARE_NAME[default: None] FILE_TO_ADD_OR_ACTION_TO_SEARCH[default:None] | 625941b12c8b7c6e89b35542 |
def get_search(): <NEW_LINE> <INDENT> return Search(client) | Returns a search object allowing to perform queries. | 625941b1d164cc6175782ac4 |
def testClosesAutoset(self): <NEW_LINE> <INDENT> s = Survey.objects.create(title=self.t, opens=self.sd) <NEW_LINE> self.assertEqual(s.closes, datetime.date(2010, 1, 4), "closes not autoset to 7 days after opens, expected %s but got %s" % (datetime.date(2010, 1, 4), s.closes)) | Tests for the Survey override method | 625941b13539df3088e2e0c2 |
@checkpoint_controller.route('/list-checkpoints/add-checkpoint', methods=['GET', 'POST']) <NEW_LINE> @login_required <NEW_LINE> @required_roles('Mentor') <NEW_LINE> def add_checkpoint(): <NEW_LINE> <INDENT> user = user_session(session['user'], session['type']) <NEW_LINE> if isinstance(user, Mentor): <NEW_LINE> <INDENT> if request.method == 'POST': <NEW_LINE> <INDENT> if request.form['add_checkpoint']: <NEW_LINE> <INDENT> students = Student.list_students() <NEW_LINE> checkpoint_name = request.form['checkpoint_name'] <NEW_LINE> date = request.form['date'] <NEW_LINE> Checkpoint.add_checkpoint_students(checkpoint_name, date, user_session(session['user'], session[ 'type']).id, students) <NEW_LINE> flash('Checkpoint was added') <NEW_LINE> return redirect(url_for('checkpoint_controller.list_checkpoints')) <NEW_LINE> <DEDENT> <DEDENT> return render_template('add_checkpoint.html', user=user_session(session['user'], session['type'])) <NEW_LINE> <DEDENT> return render_template('404.html', user=user_session(session['user'], session['type'])) | GET: returns add assistant formula
POST: returns list of assistant with new assistant added | 625941b132920d7e50b27f4b |
def _make_request(token, method_name, method='get', params=None, files=None, base_url=API_URL): <NEW_LINE> <INDENT> request_url = base_url.format(token, method_name) <NEW_LINE> logger.debug("Request: method={0} url={1} params={2} files={3}".format(method, request_url, params, files)) <NEW_LINE> read_timeout = READ_TIMEOUT <NEW_LINE> connect_timeout = CONNECT_TIMEOUT <NEW_LINE> if files and format_header_param: <NEW_LINE> <INDENT> fields.format_header_param = _no_encode(format_header_param) <NEW_LINE> <DEDENT> if params: <NEW_LINE> <INDENT> if 'timeout' in params: read_timeout = params['timeout'] + 10 <NEW_LINE> if 'connect-timeout' in params: connect_timeout = params['connect-timeout'] + 10 <NEW_LINE> <DEDENT> result = _get_req_session().request(method, request_url, params=params, files=files, timeout=(connect_timeout, read_timeout), proxies=proxy) <NEW_LINE> logger.debug("The server returned: '{0}'".format(result.text.encode('utf8'))) <NEW_LINE> return _check_result(method_name, result)['result'] | Makes a request to the Telegram API.
:param token: The bot's API token. (Created with @BotFather)
:param method_name: Name of the API method to be called. (E.g. 'getUpdates')
:param method: HTTP method to be used. Defaults to 'get'.
:param params: Optional parameters. Should be a dictionary with key-value pairs.
:param files: Optional files.
:return: The result parsed to a JSON dictionary. | 625941b1e5267d203edcda20 |
def dot_product(self, o): <NEW_LINE> <INDENT> return self.x * o.x + self.y * o.y | Returns the dot product of vectors self and o | 625941b1d7e4931a7ee9dc9b |
def _domain_event_tray_change_cb(conn, domain, dev, reason, opaque): <NEW_LINE> <INDENT> _salt_send_domain_event( opaque, conn, domain, opaque["event"], { "dev": dev, "reason": _get_libvirt_enum_string("VIR_DOMAIN_EVENT_TRAY_CHANGE_", reason), }, ) | Domain tray change events handler | 625941b182261d6c526ab21b |
def __add__(self, other): <NEW_LINE> <INDENT> newRect = RECT() <NEW_LINE> newRect.left = self.left + other.left <NEW_LINE> newRect.right = self.right + other.left <NEW_LINE> newRect.top = self.top + other.top <NEW_LINE> newRect.bottom = self.bottom + other.top <NEW_LINE> return newRect | Allow two rects to be added using + | 625941b1e8904600ed9f1ca1 |
def build_menu(self): <NEW_LINE> <INDENT> self.menu = Menu(self) | Create a new instance of the menu widget | 625941b1be7bc26dc91cd385 |
def get_deposit_spend_secret_hash(script_hex): <NEW_LINE> <INDENT> validate_deposit_script(script_hex) <NEW_LINE> opcode, data, disassembled = get_word(h2b(script_hex), 9) <NEW_LINE> return b2h(data) | Return spend secret hash for given deposit script. | 625941b1d486a94d0b98dec8 |
def fail(self, text="FAIL"): <NEW_LINE> <INDENT> _text = text if text else "FAIL" <NEW_LINE> self._freeze(_text) | Set fail finalizer to a spinner. | 625941b16aa9bd52df036b19 |
def format_error(msg) -> str: <NEW_LINE> <INDENT> return f":x: **{msg}**" | Command error format for user. | 625941b13317a56b869399e4 |
def run(self): <NEW_LINE> <INDENT> suspended = False <NEW_LINE> if self._dbus_interface is None: <NEW_LINE> <INDENT> time.sleep(2) <NEW_LINE> self.load_dbus() <NEW_LINE> <DEDENT> while not self._shutdown: <NEW_LINE> <INDENT> if self._active: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> if self._dbus_interface is None: <NEW_LINE> <INDENT> self.load_dbus() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> screensaver_active = self._dbus_interface.GetActive() <NEW_LINE> if screensaver_active: <NEW_LINE> <INDENT> if not suspended: <NEW_LINE> <INDENT> suspended = True <NEW_LINE> self.logger.info("Suspend screensaver") <NEW_LINE> self._parent.suspend_devices() <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if suspended: <NEW_LINE> <INDENT> suspended = False <NEW_LINE> self.logger.info("Resume screensaver") <NEW_LINE> self._parent.resume_devices() <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> except Exception as err: <NEW_LINE> <INDENT> self.logger.exception("Caught exception in run loop", exc_info=err) <NEW_LINE> self._dbus_interface = None <NEW_LINE> <DEDENT> <DEDENT> time.sleep(0.1) <NEW_LINE> <DEDENT> self.logger.info("Screensaver Thread finished") | Thread run loop | 625941b185dfad0860c3abd8 |
def fit(self,X,y): <NEW_LINE> <INDENT> weight = np.ones(X.shape[0]) / float(X.shape[0]) <NEW_LINE> for i in range(self.n_weakers_limit): <NEW_LINE> <INDENT> clf = DecisionTreeClassifier(max_depth=5) <NEW_LINE> self.weak_classifier_sets.append(clf.fit(X,y,sample_weight=weight)) <NEW_LINE> y_calcu = self.weak_classifier_sets[i].predict(X) <NEW_LINE> error = 1.0 - np.sum(np.maximum(0, y_calcu*y)) / float(y.shape[0]) <NEW_LINE> self.alpha[i] = 0.5 * math.log( (1-error) / error ) <NEW_LINE> weight = weight * np.exp(-y_calcu*y * self.alpha[i]) <NEW_LINE> weight = weight * 1.0/np.sum(weight) | Build a boosted classifier from the training set (X, y).
Args:
X: An ndarray indicating the samples to be trained, which shape should be (n_samples,n_features).
y: An ndarray indicating the ground-truth labels correspond to X, which shape should be (n_samples,1). | 625941b173bcbd0ca4b2bdf5 |
def test_host_is_a_url(self): <NEW_LINE> <INDENT> with self.assertRaises(SystemExit) as cm: <NEW_LINE> <INDENT> APIDownload(credentials={}, host='www.1') <NEW_LINE> <DEDENT> error_msg = 'The variable host is not a valid URL e.g: "http://apidomain.com"' <NEW_LINE> self.assertEqual(cm.exception.args[0].msg, error_msg) | The host must be a URL | 625941b131939e2706e4cbf2 |
def openDb(connDetails:str): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> cnxn = pyodbc.connect(co.deobfuscate(connDetails)) <NEW_LINE> <DEDENT> except Exception as ex: <NEW_LINE> <INDENT> logging.error("Cannot connect to the Database - please check database connection details.") <NEW_LINE> logging.error(ex) <NEW_LINE> sys.exit(-1) <NEW_LINE> <DEDENT> return cnxn | This function opens the connection to the Database
Input:
- connDetails: connection string for pyodbc in a safe way, i.e. with try except mechanism
Output:
- cnxn: output of pyodbc.connect | 625941b18e71fb1e9831d52e |
def __init__(self): <NEW_LINE> <INDENT> self.context = None <NEW_LINE> self.daemonize = 0 <NEW_LINE> self.pmconfig = pmconfig.pmConfig(self) <NEW_LINE> self.opts = self.options() <NEW_LINE> self.keys = ('source', 'output', 'derived', 'header', 'globals', 'samples', 'interval', 'type', 'precision', 'daemonize', 'influx_server', 'influx_db', 'influx_user', 'influx_pass', 'influx_tags', 'count_scale', 'space_scale', 'time_scale', 'version', 'speclocal', 'instances', 'ignore_incompat', 'omit_flat') <NEW_LINE> self.check = 0 <NEW_LINE> self.version = CONFVER <NEW_LINE> self.source = "local:" <NEW_LINE> self.output = None <NEW_LINE> self.speclocal = None <NEW_LINE> self.derived = None <NEW_LINE> self.header = 1 <NEW_LINE> self.globals = 1 <NEW_LINE> self.samples = None <NEW_LINE> self.interval = pmapi.timeval(60) <NEW_LINE> self.opts.pmSetOptionInterval(str(60)) <NEW_LINE> self.delay = 0 <NEW_LINE> self.type = 0 <NEW_LINE> self.ignore_incompat = 0 <NEW_LINE> self.instances = [] <NEW_LINE> self.omit_flat = 0 <NEW_LINE> self.precision = 3 <NEW_LINE> self.timefmt = "%H:%M:%S" <NEW_LINE> self.interpol = 0 <NEW_LINE> self.count_scale = None <NEW_LINE> self.space_scale = None <NEW_LINE> self.time_scale = None <NEW_LINE> self.influx_server = SERVER <NEW_LINE> self.influx_db = DB <NEW_LINE> self.influx_user = None <NEW_LINE> self.influx_pass = None <NEW_LINE> self.influx_tags = "" <NEW_LINE> self.runtime = -1 <NEW_LINE> self.metrics = OrderedDict() <NEW_LINE> self.pmfg = None <NEW_LINE> self.pmfg_ts = None <NEW_LINE> self.config = self.pmconfig.set_config_file(DEFAULT_CONFIG) <NEW_LINE> self.pmconfig.read_options() <NEW_LINE> self.pmconfig.read_cmd_line() <NEW_LINE> self.pmconfig.prepare_metrics() <NEW_LINE> self.pmconfig.set_signal_handler() | Construct object, prepare for command line handling | 625941b16fece00bbac2d4b3 |
@commands.command("alarm-volume") <NEW_LINE> @click.argument("host", required=1) <NEW_LINE> @click.option("--mode", "-M", type=str, required=1, help="'get' or 'set'") <NEW_LINE> @click.option("--volume", "-V", type=float) <NEW_LINE> def alarm_volume(host, mode, volume=None): <NEW_LINE> <INDENT> import googledevices.cli.commands.alarm_volume as command <NEW_LINE> command.alarm_volume(host, LOOP, mode, volume) | Get or set alarm volume. | 625941b17047854f462a118d |
def stuff(): <NEW_LINE> <INDENT> return dict() | I am not doing anything here. Look elsewhere. | 625941b1de87d2750b85fb06 |
def list_pending(): <NEW_LINE> <INDENT> return pisi.db.installdb.InstallDB().list_pending() | Return a list of configuration pending packages -> list_of_strings | 625941b167a9b606de4a7c3d |
def __call__(self): <NEW_LINE> <INDENT> return self | python.org: Protocol factory callable returning a protocol implementation.
:since: v1.0.0
| 625941b1b545ff76a8913b9a |
def difference(self, E): <NEW_LINE> <INDENT> if not isinstance(E, DictSet): <NEW_LINE> <INDENT> E = DictSet(copy(E)) <NEW_LINE> <DEDENT> foo = deepcopy(self) <NEW_LINE> for k in set(foo.keys()) | set(E.keys()): <NEW_LINE> <INDENT> foo.setdefault(k, []) <NEW_LINE> foo[k].difference_update(E.get(k, [])) <NEW_LINE> if not foo[k]: <NEW_LINE> <INDENT> del foo[k] <NEW_LINE> <DEDENT> <DEDENT> return foo | Return the difference of the sets of self with the sets of E.
(i.e. all elements that are in the sets of this DictSet but
not the others.)
DS-E <==> DS.difference(E) | 625941b1462c4b4f79d1d44e |
def clean(self, value): <NEW_LINE> <INDENT> clean_data = [] <NEW_LINE> errors = ErrorList() <NEW_LINE> if not value or isinstance(value, (list, tuple)): <NEW_LINE> <INDENT> if not value or not [v for v in value if v not in validators.EMPTY_VALUES]: <NEW_LINE> <INDENT> if self.required: <NEW_LINE> <INDENT> raise ValidationError(self.error_messages['required']) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return self.compress([]) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> raise ValidationError(self.error_messages['invalid']) <NEW_LINE> <DEDENT> for i, field in enumerate(self.fields): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> field_value = value[i] <NEW_LINE> <DEDENT> except IndexError: <NEW_LINE> <INDENT> field_value = None <NEW_LINE> <DEDENT> if i == 0 and self.required and field_value in validators.EMPTY_VALUES: <NEW_LINE> <INDENT> raise ValidationError(self.error_messages['required']) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> clean_data.append(field.clean(field_value)) <NEW_LINE> <DEDENT> except ValidationError as e: <NEW_LINE> <INDENT> errors.extend(e.messages) <NEW_LINE> <DEDENT> <DEDENT> if errors: <NEW_LINE> <INDENT> raise ValidationError(errors) <NEW_LINE> <DEDENT> out = self.compress(clean_data) <NEW_LINE> self.validate(out) <NEW_LINE> self.run_validators(out) <NEW_LINE> return out | Only the default language should be required. | 625941b166673b3332b91e0f |
def high_change3(dic, num): <NEW_LINE> <INDENT> life_list = [dic[i].loc[dic[i].index[-1], 't1'] for i in range(len(dic)-1)] <NEW_LINE> sorted_nums = sorted(enumerate(life_list), key=lambda x: x[1]) <NEW_LINE> df = pd.DataFrame(sorted_nums, columns=['idx', 'num']) <NEW_LINE> num_machine = len(dic)-1 <NEW_LINE> idx_start = (num_machine-num)//2 <NEW_LINE> range1 = df['idx'][idx_start: idx_start+num].values <NEW_LINE> return range1 | 策略3:停掉运行时间处于中间的num台高压泵
params:
dic:记录设备运行信息的字典,可变
num:需要停掉的设备数目
return:
idx:num个需要停机的设备的编号 | 625941b1e8904600ed9f1ca2 |
def getDiskInfo(): <NEW_LINE> <INDENT> disk_list = [] <NEW_LINE> all_total = 0 <NEW_LINE> used_total = 0 <NEW_LINE> for part in psutil.disk_partitions(all=False): <NEW_LINE> <INDENT> if 'cdrom' in part.opts or part.fstype == '': <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> mount_point = part.mountpoint <NEW_LINE> usage = psutil.disk_usage(mount_point) <NEW_LINE> device = part.device <NEW_LINE> d_total = usage.total <NEW_LINE> d_used = usage.used <NEW_LINE> d_free = usage.free <NEW_LINE> d_per = usage.percent <NEW_LINE> all_total += d_total <NEW_LINE> used_total += d_used <NEW_LINE> disk_vol = {'d_name':device,'d_total':d_total,'d_used':d_used,'d_free':d_free,'d_percent':d_per,'mount_point':mount_point} <NEW_LINE> disk_list.append(disk_vol) <NEW_LINE> <DEDENT> disk_list.append(format_bytes(all_total)) <NEW_LINE> disk_list.append(format_bytes(used_total)) <NEW_LINE> return disk_list | disk_partitions() : 파티션 나뉘어져 있는 디스크 정보 리스트를 반환
disk_usage(path) : path의 용량 정보들을 튜플형태로 반환 | 625941b126238365f5f0ebea |
def __del__(self): <NEW_LINE> <INDENT> self.wavFile.close() <NEW_LINE> self.asFile.close() | Close the file objects opened at creation. | 625941b1460517430c393f13 |
def _get_fields(feature): <NEW_LINE> <INDENT> fields = {} <NEW_LINE> for i in xrange(feature.GetFieldCount()): <NEW_LINE> <INDENT> field_def = feature.GetFieldDefnRef(i) <NEW_LINE> name = field_def.GetName().lower() <NEW_LINE> value = feature.GetField(i) <NEW_LINE> fields[name] = value <NEW_LINE> <DEDENT> return fields | Return a dict with all fields in the given feature.
feature - an OGR feature.
Returns an assembled python dict with a mapping of
fieldname -> fieldvalue | 625941b1097d151d1a222bdd |
def __init__(self, OF, c_name): <NEW_LINE> <INDENT> assert isinstance(OF, LowLevelType) <NEW_LINE> if isinstance(OF, Typedef): <NEW_LINE> <INDENT> OF = OF.OF <NEW_LINE> <DEDENT> self.OF = OF <NEW_LINE> self.c_name = c_name | @param OF: the equivalent rffi type
@param c_name: the name we want in C code | 625941b1d8ef3951e32432b7 |
def test_reversed_coordinates(): <NEW_LINE> <INDENT> N = 20 <NEW_LINE> s = xr.DataArray( np.random.rand(N) + 1j * np.random.rand(N), dims="x", coords={"x": np.arange(N // 2, -N // 2, -1) + 2}, ) <NEW_LINE> s2 = s.sortby("x") <NEW_LINE> xrt.assert_allclose( xrft.dft(s, dim="x", true_phase=True), xrft.dft(s2, dim="x", true_phase=True) ) | Reversed coordinates should not impact dft with true_phase = True | 625941b10a50d4780f666c0d |
def found_terminator(self): <NEW_LINE> <INDENT> self.log.debug('>> ' + self._inbuf) <NEW_LINE> self._inbuf.extend(b'\r\n') <NEW_LINE> try: <NEW_LINE> <INDENT> msg = IRCMessage(str(self._inbuf)) <NEW_LINE> <DEDENT> except IRCError as error: <NEW_LINE> <INDENT> self.log.error("Invalid IRC Message") <NEW_LINE> self.log.debug(repr(str(error))) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._on_event(msg) <NEW_LINE> <DEDENT> finally: <NEW_LINE> <INDENT> self._inbuf = bytearray() | asynchat connection handler. Internal. | 625941b1b7558d58953c4c9d |
def cellnormals(surface): <NEW_LINE> <INDENT> normals = vtk.vtkPolyDataNormals() <NEW_LINE> normals.SetInput(surface) <NEW_LINE> normals.ComputePointNormalsOff() <NEW_LINE> normals.ComputeCellNormalsOn() <NEW_LINE> normals.Update() <NEW_LINE> return normals.GetOutput() | Add cell normals. | 625941b176d4e153a657e8a9 |
def test_validate_activatable(): <NEW_LINE> <INDENT> base = ActivateEnvBase() <NEW_LINE> base.validate_activatable() <NEW_LINE> os.environ['AIIDA_PROJECT_ACTIVE'] = 'a_loaded_project' <NEW_LINE> with pytest.raises(Exception) as exception: <NEW_LINE> <INDENT> base.validate_activatable() <NEW_LINE> <DEDENT> assert "needs to be deactivated prior to" in str(exception.value) <NEW_LINE> os.environ['AIIDA_PROJECT_ACTIVE'] = '' <NEW_LINE> os.environ['AIIDA_PATH'] = '/a/test/aiida/path' <NEW_LINE> with pytest.raises(Exception) as exception: <NEW_LINE> <INDENT> base.validate_activatable() <NEW_LINE> <DEDENT> assert "AIIDA_PATH is already set" in str(exception.value) <NEW_LINE> os.environ['AIIDA_PROJECT_ACTIVE'] = '' <NEW_LINE> os.environ['AIIDA_PATH'] = '' | Check that we only activate if no another project is active | 625941b18e7ae83300e4ad4d |
def _activation_summary(tensor): <NEW_LINE> <INDENT> tf.summary.histogram(tensor.op.name + '/activations', tensor) <NEW_LINE> tf.summary.scalar(tensor.op.name + '/sparsity', tf.nn.zero_fraction(tensor)) | Create summaries for a given tensor.
Args:
tensor: a tf.Tensor.
Returns:
None. | 625941b13317a56b869399e6 |
def ls(self, count = 200): <NEW_LINE> <INDENT> return self._manager.ls_notes(self['id'], count) | list all notes in this notebook | 625941b123849d37ff7b2e14 |
def savehistory(self): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> if self.searchquery.text().isEmpty(): <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> elif unicode(self.searchquery.text()) in self.history: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> if len(self.history) > 10: <NEW_LINE> <INDENT> self.history.pop(10) <NEW_LINE> <DEDENT> self.history.append(unicode(self.searchquery.text())) <NEW_LINE> basename = "history" <NEW_LINE> fileName = '%s/%s.onlinesearch' % (cwd, basename) <NEW_LINE> pickle.dump(self.history, open(fileName, "wb")) <NEW_LINE> return True <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> return False | Will be done everytime when "OK" is klicked and the browser is opened
Is saving the last 10 entered searchstring.
:return: True or False | 625941b1adb09d7d5db6c516 |
def update(self): <NEW_LINE> <INDENT> self.json = c.get_document(self.uri.did).json() <NEW_LINE> self.e_list = c.element_list(self.uri.as_dict()).json() | All client calls to update this instance with Onshape. | 625941b1f9cc0f698b140381 |
def _cast_value(self, value): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> value = int(value) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> if value.lower().strip() in ["true", "t", "1", "yes"]: <NEW_LINE> <INDENT> value = True <NEW_LINE> <DEDENT> elif value.lower().strip() in ["false", "f", "no", "0"]: <NEW_LINE> <INDENT> value = False <NEW_LINE> <DEDENT> <DEDENT> return value | Soportados: int, bool, str | 625941b145492302aab5e040 |
def ReorderPageSet(self, results_file): <NEW_LINE> <INDENT> page_set_dict = {} <NEW_LINE> for page in self.user_stories: <NEW_LINE> <INDENT> page_set_dict[page.url] = page <NEW_LINE> <DEDENT> user_stories = [] <NEW_LINE> with open(results_file, 'rb') as csv_file: <NEW_LINE> <INDENT> csv_reader = csv.reader(csv_file) <NEW_LINE> csv_header = csv_reader.next() <NEW_LINE> if 'url' not in csv_header: <NEW_LINE> <INDENT> raise Exception('Unusable results_file.') <NEW_LINE> <DEDENT> url_index = csv_header.index('url') <NEW_LINE> for csv_row in csv_reader: <NEW_LINE> <INDENT> if csv_row[url_index] in page_set_dict: <NEW_LINE> <INDENT> self.AddPage(page_set_dict[csv_row[url_index]]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise Exception('Unusable results_file.') <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return user_stories | Reorders this page set based on the results of a past run. | 625941b163b5f9789fde6e61 |
def gen_logger(logger_name: str = None) -> logging.Logger: <NEW_LINE> <INDENT> logger = logging.getLogger(str(random.random())) <NEW_LINE> logger.setLevel(logging.DEBUG) <NEW_LINE> logger.name = logger_name <NEW_LINE> if logger_name is None: <NEW_LINE> <INDENT> formatter = logging.Formatter('[%(asctime)s] [%(levelname)s] %(message)s', datefmt='%Y/%m/%d %H:%M:%S') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> formatter = logging.Formatter('[%(asctime)s] [%(name)s~%(levelname)s] %(message)s', datefmt='%Y/%m/%d %H:%M:%S') <NEW_LINE> <DEDENT> stream_handler = logging.StreamHandler() <NEW_LINE> stream_handler.setFormatter(formatter) <NEW_LINE> stream_handler.setLevel(logging.WARNING) <NEW_LINE> logger.addHandler(stream_handler) <NEW_LINE> return logger | generate logger by Python standard library `logging`
todo add other handlers
Notes:
1. recommend a third-party module `loguru`, more powerful and pleasant | 625941b1de87d2750b85fb07 |
def extract_quotes(html: str = HTML) -> dict: <NEW_LINE> <INDENT> quotes = re.findall(r"<p>(.*\"(.*)\" - (.*)\"?)<\/p>", html) <NEW_LINE> return {quote[2].strip(): quote[1].strip() for quote in quotes} | See instructions in the Bite description | 625941b13c8af77a43ae3522 |
def times_to_intervals(simulation_times): <NEW_LINE> <INDENT> intervals = [] <NEW_LINE> for i, time in enumerate(simulation_times): <NEW_LINE> <INDENT> if i < len(simulation_times) - 1: <NEW_LINE> <INDENT> interval = (time, simulation_times[i + 1]) <NEW_LINE> intervals.append(interval) <NEW_LINE> <DEDENT> <DEDENT> return intervals | >>> times_to_intervals([0, 4, 5, 8, 9, 12])
[(0, 4), (4, 5), (5, 8), (8, 9), (9, 12)] | 625941b130bbd722463cbb45 |
def begin_create_or_update( self, resource_group_name, virtual_hub_name, route_table_name, virtual_hub_route_table_v2_parameters, **kwargs ): <NEW_LINE> <INDENT> polling = kwargs.pop('polling', True) <NEW_LINE> cls = kwargs.pop('cls', None) <NEW_LINE> lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) <NEW_LINE> cont_token = kwargs.pop('continuation_token', None) <NEW_LINE> if cont_token is None: <NEW_LINE> <INDENT> raw_result = self._create_or_update_initial( resource_group_name=resource_group_name, virtual_hub_name=virtual_hub_name, route_table_name=route_table_name, virtual_hub_route_table_v2_parameters=virtual_hub_route_table_v2_parameters, cls=lambda x,y,z: x, **kwargs ) <NEW_LINE> <DEDENT> kwargs.pop('error_map', None) <NEW_LINE> kwargs.pop('content_type', None) <NEW_LINE> def get_long_running_output(pipeline_response): <NEW_LINE> <INDENT> deserialized = self._deserialize('VirtualHubRouteTableV2', pipeline_response) <NEW_LINE> if cls: <NEW_LINE> <INDENT> return cls(pipeline_response, deserialized, {}) <NEW_LINE> <DEDENT> return deserialized <NEW_LINE> <DEDENT> path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'), 'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'), } <NEW_LINE> if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs) <NEW_LINE> elif polling is False: polling_method = NoPolling() <NEW_LINE> else: polling_method = polling <NEW_LINE> if cont_token: <NEW_LINE> <INDENT> return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return LROPoller(self._client, raw_result, get_long_running_output, polling_method) | Creates a VirtualHubRouteTableV2 resource if it doesn't exist else updates the existing
VirtualHubRouteTableV2.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param route_table_name: The name of the VirtualHubRouteTableV2.
:type route_table_name: str
:param virtual_hub_route_table_v2_parameters: Parameters supplied to create or update
VirtualHubRouteTableV2.
:type virtual_hub_route_table_v2_parameters: ~azure.mgmt.network.v2019_12_01.models.VirtualHubRouteTableV2
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualHubRouteTableV2 or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_12_01.models.VirtualHubRouteTableV2]
:raises ~azure.core.exceptions.HttpResponseError: | 625941b192d797404e303f0d |
def execute(self, loadbalancer, listeners): <NEW_LINE> <INDENT> LOG.debug("Mark ACTIVE in DB for load balancer id: %s " "and listener ids: %s", loadbalancer.id, ', '.join([l.id for l in listeners])) <NEW_LINE> self.loadbalancer_repo.update(db_apis.get_session(), loadbalancer.id, provisioning_status=constants.ACTIVE) <NEW_LINE> for listener in listeners: <NEW_LINE> <INDENT> self.listener_repo.update(db_apis.get_session(), listener.id, provisioning_status=constants.ACTIVE) | Mark the load balancer and listeners as active in DB.
:param loadbalancer: Load balancer object to be updated
:param listeners: Listener objects to be updated
:returns: None | 625941b1b545ff76a8913b9c |
def processor_affinity(self): <NEW_LINE> <INDENT> return _blocks_swig2.abs_ff_sptr_processor_affinity(self) | processor_affinity(abs_ff_sptr self) -> std::vector< int,std::allocator< int > > | 625941b121a7993f00bc7a69 |
def _auto_progress_api(api, interval=0.2): <NEW_LINE> <INDENT> with suppress(BaseFakeAPI.NoMoreStatesError): <NEW_LINE> <INDENT> while True: <NEW_LINE> <INDENT> time.sleep(interval) <NEW_LINE> api.progress() | Progress a `BaseFakeAPI` instacn every `interval` seconds until reaching
the final state. | 625941b1462c4b4f79d1d44f |
def set_request_uri(self, uri): <NEW_LINE> <INDENT> parsed = urllib.parse.urlparse(uri, allow_fragments=False) <NEW_LINE> if parsed.scheme != 'coap': <NEW_LINE> <INDENT> self.opt.proxy_uri = uri <NEW_LINE> return <NEW_LINE> <DEDENT> if parsed.username or parsed.password: <NEW_LINE> <INDENT> raise ValueError("User name and password not supported.") <NEW_LINE> <DEDENT> if parsed.path not in ('', '/'): <NEW_LINE> <INDENT> self.opt.uri_path = parsed.path.split('/')[1:] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.opt.uri_path = [] <NEW_LINE> <DEDENT> if parsed.query: <NEW_LINE> <INDENT> self.opt.uri_query = parsed.query.split('&') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.opt.uri_query = [] <NEW_LINE> <DEDENT> if parsed.port: <NEW_LINE> <INDENT> self.opt.uri_port = parsed.port <NEW_LINE> <DEDENT> self.opt.uri_host = parsed.hostname | Parse a given URI into the uri_* fields of the options.
The remote does not get set automatically; instead, the remote data is
stored in the uri_host and uri_port options. That is because name resolution
is coupled with network specifics the protocol will know better by the
time the message is sent. Whatever sends the message, be it the
protocol itself, a proxy wrapper or an alternative transport, will know
how to handle the information correctly. | 625941b1507cdc57c6306a4d |
def fn_float16(self, value): <NEW_LINE> <INDENT> if is_ndarray(value) or isinstance(value, (list, tuple)): <NEW_LINE> <INDENT> return self._to_ndarray(value).astype('float16') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return float(value) | Return the value cast to a 16-bit float (numpy array) or a Python float (single value).
:param value: The number.
:return: The number as a float. | 625941b13539df3088e2e0c6 |
def _get_page(self, url: str) -> str: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> while True: <NEW_LINE> <INDENT> response = requests.get(url, timeout=15, headers=self.HEADERS) <NEW_LINE> response.raise_for_status() <NEW_LINE> if 'Проверка, что Вы не робот' in response.text: <NEW_LINE> <INDENT> logger.warning(f'CAPCHA on url:{url}') <NEW_LINE> self.browser.open_new_tab(url) <NEW_LINE> time.sleep(90) <NEW_LINE> continue <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> logger.info(f'Get page url:{url} success!') <NEW_LINE> return response.text <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> except requests.RequestException as err: <NEW_LINE> <INDENT> logger.error(f'RequestException url:{url} err:{str(err)}') <NEW_LINE> return '' | Скачивает html страницу по заданному url | 625941b1d164cc6175782ac9 |
def to_dict(self): <NEW_LINE> <INDENT> d = {'accepted': self.accepted} <NEW_LINE> if not self.accepted and bool(self.rejection_reason): <NEW_LINE> <INDENT> d['rejection_reason'] = self.rejection_reason <NEW_LINE> <DEDENT> d['verified_identity'] = self.identity.to_dict() <NEW_LINE> d['expiration_date'] = self.expiration_date <NEW_LINE> d['id_back_photo_valid'] = self.id_back_photo_valid <NEW_LINE> d['id_front_photo_valid'] = self.id_front_photo_valid <NEW_LINE> d['voter_reg_photo_valid'] = self.voter_reg_photo_valid <NEW_LINE> d['owner_photo_valid'] = self.owner_photo_valid <NEW_LINE> return d | convert object to dict | 625941b126238365f5f0ebec |
def text_from_html(html): <NEW_LINE> <INDENT> parser = _GetText() <NEW_LINE> parser.feed(html) <NEW_LINE> return "".join(parser.texts) | text = text_from_html(html)
Gets the text from the HTML representation
Parameters
----------
html : str or unicode
HTML Representation
Returns
-------
text : str or unicode
Just the textual content of `html` | 625941b182261d6c526ab21f |
def load_model(self, filepath): <NEW_LINE> <INDENT> f = open(filepath, "r") <NEW_LINE> lines = f.readlines() <NEW_LINE> self.vcnt, bicnt = map(int, lines[0].split()) <NEW_LINE> for i in xrange(1, self.vcnt+1): <NEW_LINE> <INDENT> word, cnt = lines[i].split() <NEW_LINE> self.unigramdict[word] = float(cnt) <NEW_LINE> <DEDENT> for i in xrange(self.vcnt+1, len(lines)): <NEW_LINE> <INDENT> word1, word2, cnt = lines[i].split() <NEW_LINE> self.ngramdict[word1+" "+word2] = float(cnt) | load the model defined in save_model | 625941b18e05c05ec3eea0f3 |
def check_turned_away_face_from_plane(face,plane_normal,sign = 1.0, local_sys = None, strict = False): <NEW_LINE> <INDENT> if local_sys is None: <NEW_LINE> <INDENT> if strict: <NEW_LINE> <INDENT> if sign*inner_product(face.getNormal(),plane_normal) > 0.0: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if sign*inner_product(face.getNormal(),plane_normal) >= 0.0: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> raise NotImplementedError("Error: More Precise checks are not implemented yet!") | Definition: A face is called turned away from a plane
iff <n_p,n_f(x)> >= 0 for all x in
the face, where n_p is the normal
of the plane, and n_f(x) the normal of
the face in a point x.
Analogously: A face is called strictly
turned away iff <n_p,n_f(x)> > 0 for
all x in the face. | 625941b1627d3e7fe0d68bca |
def scoring(machine, strand): <NEW_LINE> <INDENT> score = sum(machine.consume(char) for char in strand) <NEW_LINE> machine.reset() <NEW_LINE> return score | the total score produced by running `strand` through `machine` | 625941b1d486a94d0b98decc |
def _linear_learning_rate(num_linear_feature_columns): <NEW_LINE> <INDENT> default_learning_rate = 1. / math.sqrt(num_linear_feature_columns) <NEW_LINE> return min(_LINEAR_LEARNING_RATE, default_learning_rate) | Returns the default learning rate of the linear model.
The calculation is a historical artifact of this initial implementation, but
has proven a reasonable choice.
Args:
num_linear_feature_columns: The number of feature columns of the linear
model.
Returns:
A float. | 625941b1099cdd3c635f09e1 |
def disable_sel_expired(modeladmin, request, queryset): <NEW_LINE> <INDENT> querylen = len(queryset) <NEW_LINE> discnt = disable_expired(queryset) <NEW_LINE> pstr = plural(querylen, 'paste') <NEW_LINE> msg = 'Disabled {} of {} selected {}.'.format(discnt, querylen, pstr) <NEW_LINE> modeladmin.message_user(request, msg, level=messages.SUCCESS) | Disable selected expired pastes. | 625941b16aa9bd52df036b1e |
def get_scale(x): <NEW_LINE> <INDENT> scales = [20, 50, 100, 200, 400, 600, 800, 1000] <NEW_LINE> for scale in scales: <NEW_LINE> <INDENT> if x <= scale: <NEW_LINE> <INDENT> return scale <NEW_LINE> <DEDENT> <DEDENT> return x | Finds the lowest scale where x <= scale. | 625941b123849d37ff7b2e16 |
def control(z, z_target, ctrl, t=None, w=None, f=None): <NEW_LINE> <INDENT> z_t = z_target(t) <NEW_LINE> dz_t = (z_target(t+.05)-z_target(t-.05))/.1 <NEW_LINE> d2z_t = (z_target(t+.05)-2.*z_target(t)+z_target(t-.05))/.05**2 <NEW_LINE> if ctrl['mode'] is 'sliding': <NEW_LINE> <INDENT> x2 = w <NEW_LINE> f2 = f._f(z, ctrl['waterp'], ctrl['Lv'])/f.m <NEW_LINE> f3 = ( f.volume(z=z+.5, waterp=ctrl['waterp']) - f.volume(z=z-.5, waterp=ctrl['waterp']) )/1. *x2 <NEW_LINE> df1, df2, df3 = f._df(f.z, ctrl['waterp'], ctrl['Lv']) <NEW_LINE> df1, df2, df3 = df1/f.m, df2/f.m, df3/f.m <NEW_LINE> d3y = ctrl['d3y_ctrl']*control_sliding(z, w, f2, z_t, dz_t, d2z_t, ctrl['tau']) <NEW_LINE> u = df1*x2 + df2*f2 + df3*f3 - d3y <NEW_LINE> u = -u/df3 <NEW_LINE> <DEDENT> elif ctrl['mode'] is 'pid': <NEW_LINE> <INDENT> error = z_t - z <NEW_LINE> ctrl['integral'] += error*ctrl['dt_ctrl'] <NEW_LINE> ctrl['derivative'] = (error - ctrl['error'])/ctrl['dt_ctrl'] <NEW_LINE> ctrl['error'] = error <NEW_LINE> u = ctrl['Kp']*ctrl['error'] + ctrl['Ki']*ctrl['integral'] + ctrl['Kd']*ctrl['derivative'] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print('!! mode '+ctrl['mode']+' is not implemented, exiting ...') <NEW_LINE> sys.exit() <NEW_LINE> <DEDENT> return u | Implements the control of the float position
| 625941b1b57a9660fec335fb |
def frustumShellVol(rb_0, rt_0, t, h, diamFlag=False): <NEW_LINE> <INDENT> if diamFlag: <NEW_LINE> <INDENT> rb, rt = 0.5 * rb_0, 0.5 * rt_0 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> rb, rt = rb_0, rt_0 <NEW_LINE> <DEDENT> rb_o = rb <NEW_LINE> rb_i = rb - t <NEW_LINE> rt_o = rt <NEW_LINE> rt_i = rt - t <NEW_LINE> return frustumVol(rb_o, rt_o, h) - frustumVol(rb_i, rt_i, h) | This function returns a frustum shell's volume (for computing mass with density) with radii or diameter inputs.
NOTE: This is for a frustum SHELL, not a solid
INPUTS:
Parameters
----------
rb : float (scalar/vector), base radius
rt : float (scalar/vector), top radius
t : float (scalar/vector), thickness
h : float (scalar/vector), height
diamFlag : boolean, True if rb and rt are entered as diameters
OUTPUTs:
-------
cg : float (scalar/vector), center of mass/gravity (ventroid) | 625941b17c178a314d6ef1d5 |
def parse_data(data_file): <NEW_LINE> <INDENT> csvfile = open(data_file, 'r') <NEW_LINE> csvreader = csv.reader(csvfile) <NEW_LINE> key_url_list = {line[0]: line[1] for line in csvreader} <NEW_LINE> return key_url_list | read index files | 625941b10c0af96317bb7f6d |
def close(self): <NEW_LINE> <INDENT> if not self.bng: <NEW_LINE> <INDENT> raise BNGError('Scenario needs to be loaded into a BeamNGpy ' 'instance to be stopped.') <NEW_LINE> <DEDENT> for vehicle in self.vehicles: <NEW_LINE> <INDENT> vehicle.close() <NEW_LINE> <DEDENT> self.bng = None <NEW_LINE> self.logger.debug('Removed beamngpy instance from scenario class.') | Closes open connections and allocations of the scenario. | 625941b173bcbd0ca4b2bdf9 |
def _validate_scalarization_parameter_shape( multi_objectives: tf.Tensor, params: Dict[str, Union[Sequence[ScalarFloat], tf.Tensor]]): <NEW_LINE> <INDENT> for param_name, param_value in params.items(): <NEW_LINE> <INDENT> param_shape = tf.convert_to_tensor(param_value).shape <NEW_LINE> if param_shape.rank != 1 and not multi_objectives.shape.is_compatible_with( param_shape): <NEW_LINE> <INDENT> raise ValueError( 'The shape of multi_objectives: {} does not match the shape of ' 'scalarization parameter: {}, which is {}'.format( multi_objectives.shape, param_name, param_shape)) | A private helper that validates the shapes of scalarization parameters.
Every scalarization parameter in the input dictionary is either a 1-D tensor
or `Sequence`, or a tensor whose shape matches the shape of the input
`multi_objectives` tensor. This is invoked by the `Scalarizer.call` method.
Args:
multi_objectives: A `tf.Tensor` representing the multiple objectives to be
scalarized.
params: A dictionary from parameter names to parameter values (`Sequence` or
`tf.Tensor`).
Raises:
tf.errors.InvalidArgumentError: if any scalarization parameter is not a 1-D
tensor or `Sequence`, and has shape that does not match the shape of
`multi_objectives`. | 625941b145492302aab5e041 |
def test_normal_gpu(self): <NEW_LINE> <INDENT> self.yaml_config_name = sys._getframe().f_code.co_name + '.yaml' <NEW_LINE> self.yaml_content["runner"][0]["device"] = 'gpu' <NEW_LINE> self.run_yaml() <NEW_LINE> built_in.equals(self.pro.returncode, 0, self.err_msg) <NEW_LINE> built_in.not_contains(self.err, 'Traceback', self.err_msg) <NEW_LINE> built_in.regex_match_len(self.out, self.epoch_re, 2, self.err_msg) | test normal yaml construct by RankDNN base in gpu. | 625941b14f6381625f1147c3 |
def calc_slope_distance_ver_lines(mat, ratio=0.3, search_range=30.0, radius=9, sensitive=0.1, bgr="bright", denoise=True, norm=True, subpixel=True): <NEW_LINE> <INDENT> if denoise is True: <NEW_LINE> <INDENT> mat = ndi.gaussian_filter(mat, 3) <NEW_LINE> <DEDENT> mat_roi = prep._select_roi(mat, ratio, square=True) <NEW_LINE> if bgr == "bright": <NEW_LINE> <INDENT> mat_roi = np.max(mat_roi) - mat_roi <NEW_LINE> <DEDENT> angle_coarse = np.arange(-search_range, search_range + 1.0) <NEW_LINE> mask = _make_circle_mask(mat_roi.shape[0], 0.92) <NEW_LINE> sinogram1 = radon(mat_roi * mask, theta=angle_coarse, circle=True) <NEW_LINE> list_max1 = np.amax(sinogram1, axis=0) <NEW_LINE> pos_max1 = np.argmax(list_max1) <NEW_LINE> best_angle1 = angle_coarse[pos_max1] <NEW_LINE> angle_fine = np.arange(best_angle1 - 1.0, best_angle1 + 1.05, 0.05) <NEW_LINE> sinogram2 = radon(mat_roi * mask, theta=angle_fine, circle=True) <NEW_LINE> list_max2 = np.amax(sinogram2, axis=0) <NEW_LINE> pos_max2 = np.argmax(list_max2) <NEW_LINE> best_angle2 = angle_fine[pos_max2] <NEW_LINE> slope = np.tan(best_angle2 * np.pi / 180.0) <NEW_LINE> list_ext_point = get_local_extrema_points(sinogram2[:, pos_max2], option="max", radius=radius, denoise=denoise, norm=norm, subpixel=subpixel, sensitive=sensitive) <NEW_LINE> if len(list_ext_point) > 3: <NEW_LINE> <INDENT> distance = np.median(np.abs(np.diff(list_ext_point))) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> distance = np.mean(np.abs(np.diff(list_ext_point))) <NEW_LINE> <DEDENT> return slope, distance | Calculate the representative distance between vertical lines and the
representative slope of these lines using the ROI around the middle of a
line-pattern image.
Parameters
----------
mat : array_like
2D array.
ratio : float
Used to select the ROI around the middle of an image.
search_range : float
Search range in Degree to determine the slope of lines.
radius : int
Search radius. Used to locate lines.
sensitive : float
To detect lines against random noise. Smaller is more sensitive.
bgr : {"bright", "dark"}
Specify the brightness of the background against the lines.
denoise : bool, optional
Applying a smoothing filter if True.
subpixel : bool, optional
Locate points with subpixel accuracy.
Returns
-------
slope : float
Slope of vertical lines in Radian.
distance : float
Distance between vertical lines. | 625941b12c8b7c6e89b35548 |
def setShowPercentTextFlag(self, flag): <NEW_LINE> <INDENT> self.showPercentText = flag | Sets the flag that indicates that the text for the
percent should be displayed. | 625941b1293b9510aa2c3017 |
def _get_client(self): <NEW_LINE> <INDENT> url = "http://10.182.155.37/UserAuthentication.asmx?WSDL" <NEW_LINE> client = Client(url) <NEW_LINE> return client | contacts the webservice and gets a client with suds
:return: client object | 625941b130bbd722463cbb47 |
def weights_from_rewards(self, rewards): <NEW_LINE> <INDENT> return reps_weights_from_rewards(rewards, self.rel_entropy_bound, self.min_temperature) | Wrapper function for reps_weights_from_rewards | 625941b18a43f66fc4b53def |
def _ws_data(self, ws): <NEW_LINE> <INDENT> data = {'obj': ws, 'id': ws.id, 'url': ws.absolute_url(), 'template_title': ws.getWorksheetTemplateTitle(), 'remarks': ws.getRemarks(), 'date_printed': self.ulocalized_time(DateTime(), long_format=1), 'date_created': self.ulocalized_time(ws.created(), long_format=1)} <NEW_LINE> data['ars'] = self._analyses_data(ws) <NEW_LINE> data['createdby'] = self._createdby_data(ws) <NEW_LINE> data['analyst'] = self._analyst_data(ws) <NEW_LINE> data['printedby'] = self._printedby_data(ws) <NEW_LINE> ans = [] <NEW_LINE> for ar in data['ars']: <NEW_LINE> <INDENT> ans.extend([an['title'] for an in ar['analyses']]) <NEW_LINE> <DEDENT> data['analyses_titles'] = list(set(ans)) <NEW_LINE> portal = self.context.portal_url.getPortalObject() <NEW_LINE> data['portal'] = {'obj': portal, 'url': portal.absolute_url()} <NEW_LINE> data['laboratory'] = self._lab_data() <NEW_LINE> return data | Creates an ws dict, accessible from the view and from each
specific template.
Keys: obj, id, url, template_title, remarks, date_printed,
ars, createdby, analyst, printedby, analyses_titles,
portal, laboratory | 625941b10a366e3fb873e593 |
def build(self, baserepo=None, ref=None, baseconfig=None, message_id=None, subject=None, emails=set(), patch_url_list=[], makeopts=None): <NEW_LINE> <INDENT> params = dict() <NEW_LINE> if baserepo is not None: <NEW_LINE> <INDENT> params["baserepo"] = baserepo <NEW_LINE> <DEDENT> if ref is not None: <NEW_LINE> <INDENT> params["ref"] = ref <NEW_LINE> <DEDENT> if baseconfig is not None: <NEW_LINE> <INDENT> params["baseconfig"] = baseconfig <NEW_LINE> <DEDENT> if message_id: <NEW_LINE> <INDENT> params["message_id"] = message_id <NEW_LINE> <DEDENT> if subject: <NEW_LINE> <INDENT> params["subject"] = subject <NEW_LINE> <DEDENT> if emails: <NEW_LINE> <INDENT> params["emails"] = ",".join(emails) <NEW_LINE> <DEDENT> if patch_url_list: <NEW_LINE> <INDENT> params["patchwork"] = " ".join(patch_url_list) <NEW_LINE> <DEDENT> if makeopts is not None: <NEW_LINE> <INDENT> params["makeopts"] = makeopts <NEW_LINE> <DEDENT> logging.debug(params) <NEW_LINE> job = self.__get_job() <NEW_LINE> expected_id = self.__get_next_build_number(job) <NEW_LINE> self.__build_job(params) <NEW_LINE> build = self.find_build(params, expected_id) <NEW_LINE> logging.info("submitted build: %s", build) <NEW_LINE> return build.get_number() | Submit a build of a patch series.
Args:
baserepo: Baseline Git repo URL.
ref: Baseline Git reference to test.
baseconfig: Kernel configuration URL.
message_id: Value of the "Message-Id" header of the e-mail
message representing the series, or None if
unknown.
subject: Subject of the message representing the series,
or None if unknown.
emails: Set of e-mail addresses involved with the series
to send notifications to.
patch_url_list: List of URLs pointing to patches to apply, in the
order they should be applied in.
makeopts: String of extra arguments to pass to the build's
make invocation.
Returns:
Submitted build number. | 625941b126238365f5f0ebee |
def addLookAt(self, geoPoint): <NEW_LINE> <INDENT> self.checkAnd() <NEW_LINE> self._requestString += ("lookat=" + str(geoPoint)) <NEW_LINE> return self | Добавляет параметр поиска по изображениям, направленных в направлении указанной точки
Не имеет смысла без использования addCloseTo(geoPoint) или addBbox(geoMin, geoMax)
:param: geoPoint - объект типа model.GeoPoint, указывающий точку, на которую должны быть направлены изображения из ответа | 625941b115baa723493c3cf6 |
def create(self): <NEW_LINE> <INDENT> genes = [] <NEW_LINE> u_bounds = [] <NEW_LINE> l_bounds = [] <NEW_LINE> for i in range(self.n_ins, self.n_ins + self.n_fun_nodes): <NEW_LINE> <INDENT> upper_bound = self.n_funs - 1 <NEW_LINE> lower_bound = 0 <NEW_LINE> function_gene = randint(0, upper_bound) <NEW_LINE> genes.append(function_gene) <NEW_LINE> u_bounds.append(upper_bound) <NEW_LINE> l_bounds.append(lower_bound) <NEW_LINE> current_column = (i - self.n_ins) // self.n_rows <NEW_LINE> upper_bound = self.n_ins + current_column * self.n_rows - 1 <NEW_LINE> lower_bound = clamp_bottom(upper_bound - self.max_back + 1, 0) <NEW_LINE> for _ in range(self.arity): <NEW_LINE> <INDENT> u_bounds.append(upper_bound) <NEW_LINE> l_bounds.append(lower_bound) <NEW_LINE> genes.append(randint(lower_bound, upper_bound)) <NEW_LINE> <DEDENT> <DEDENT> output_gene_upper_bound = self.n_ins + self.n_fun_nodes - 1 <NEW_LINE> output_gene_lower_bound = clamp_bottom( output_gene_upper_bound - self.max_back + 1, 0 ) <NEW_LINE> for i in range(self.n_outs): <NEW_LINE> <INDENT> u_bounds.append(output_gene_upper_bound) <NEW_LINE> l_bounds.append(output_gene_lower_bound) <NEW_LINE> genes.append(randint(output_gene_lower_bound, output_gene_upper_bound)) <NEW_LINE> <DEDENT> return genes, (l_bounds, u_bounds) | Create an individual primitives
Returns
-------
tuple
tuple holding list of genes and list of bounds | 625941b1cdde0d52a9e52db3 |
def parseRules(self, rule_tuple): <NEW_LINE> <INDENT> valid_rule = re.compile("^[a-z]+\*?\d[a-z]*[>\.]?$") <NEW_LINE> self.rule_dictionary = {} <NEW_LINE> for rule in rule_tuple: <NEW_LINE> <INDENT> if not valid_rule.match(rule): <NEW_LINE> <INDENT> raise ValueError("The rule {0} is invalid".format(rule)) <NEW_LINE> <DEDENT> first_letter = rule[0:1] <NEW_LINE> if first_letter in self.rule_dictionary: <NEW_LINE> <INDENT> self.rule_dictionary[first_letter].append(rule) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.rule_dictionary[first_letter] = [rule] | Validate the set of rules used in this stemmer.
| 625941b191af0d3eaac9b796 |
def tertiary_semclass1(self): <NEW_LINE> <INDENT> vals = self.semclass1_values() <NEW_LINE> if len(vals) >= 3: <NEW_LINE> <INDENT> return vals[2] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return None | ConnHeadSemClass1 has fields separated by dots. This function
returns the third (as a str) if there is one, else None.
Values (except None):
Chosen alternative, Conjunctive, Contra-expectation,
Disjunctive, Equivalence, Expectation, Factual past, Factual
present, General, Generalization, Hypothetical, Implicit
assertion, Justification, Juxtaposition, NONE, Opposition,
Precedence, Reason, Relevance, Result,Specification,
Succession, Unreal past, Unreal present | 625941b14a966d76dd550d8f |
def __init__(self, addr=None, dev_cat=None, sub_cat=None, firmware=None, is_ack=None): <NEW_LINE> <INDENT> super().__init__() <NEW_LINE> self.addr = addr <NEW_LINE> self.dev_cat = dev_cat <NEW_LINE> self.sub_cat = sub_cat <NEW_LINE> self.firmware = firmware <NEW_LINE> self.is_ack = is_ack | Constructor
Args:
is_ack (bool): True for ACK, False for NAK. None for output
commands to the modem. | 625941b1e5267d203edcda27 |
def mouseDoubleClickEvent(self, event): <NEW_LINE> <INDENT> super(XNodeScene, self).mouseDoubleClickEvent(event) <NEW_LINE> if event.button() == Qt.LeftButton: <NEW_LINE> <INDENT> item = self.itemAt(event.scenePos()) <NEW_LINE> if not item: <NEW_LINE> <INDENT> self.clearSelection() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> blocked = self.signalsBlocked() <NEW_LINE> self.blockSignals(True) <NEW_LINE> self.clearSelection() <NEW_LINE> item.setSelected(True) <NEW_LINE> self.blockSignals(blocked) <NEW_LINE> if isinstance(item, XNode) and not blocked: <NEW_LINE> <INDENT> self.nodeDoubleClicked.emit(item) <NEW_LINE> <DEDENT> elif isinstance(item, XNodeConnection) and not blocked: <NEW_LINE> <INDENT> self.connectionDoubleClicked.emit(item) <NEW_LINE> <DEDENT> if not blocked: <NEW_LINE> <INDENT> self.itemDoubleClicked.emit(item) | Emits the node double clicked event when a node is double clicked.
:param event | <QMouseDoubleClickEvent> | 625941b16aa9bd52df036b20 |
def __init__(self, n: int): <NEW_LINE> <INDENT> super(DiscreteConjunctionsCounter, self).__init__(n) <NEW_LINE> self.n = n | Initializes the object that counts all possible discrete conjunctions over the finite chain Ln.
Args:
n: An integer, representing the dimension of the finite chain. | 625941b10383005118ecf363 |
def initialize_agents(config): <NEW_LINE> <INDENT> buyer_prices = [np.arange(config.lowprice, get_random_high_buyer(config.lowprice, config.highprice), .50) for _ in range(config.nbuyers)] <NEW_LINE> seller_prices = [np.arange(get_random_low_seller(config.lowprice, config.highprice), config.highprice + .5, .50) for _ in range(config.nsellers)] <NEW_LINE> buyers = [Buyer(prices, config) for prices in buyer_prices] <NEW_LINE> sellers = [Seller(prices, config) for prices in seller_prices] <NEW_LINE> return buyers, sellers | Initializes agents using the config files. Does 2 things:
Makes a list of buyers, all buyers have a random price range from (lowest - random number)
Makes a list of sellers, all sellers have a random price range from (random number - highest) | 625941b185dfad0860c3abdc |
def kidFile(): <NEW_LINE> <INDENT> from os import path as osp <NEW_LINE> from lib.KidDir import KidFile <NEW_LINE> PATH = osp.dirname(osp.abspath(__file__)) <NEW_LINE> FIRST = 'kidFile' <NEW_LINE> SECONDE = 'codeVs.py' <NEW_LINE> THIRD = '.docker' <NEW_LINE> FOUR = '.docker.png' <NEW_LINE> path1 = osp.join(PATH,FIRST) <NEW_LINE> path2 = osp.join(PATH,SECONDE) <NEW_LINE> path3 = osp.join(PATH,THIRD) <NEW_LINE> path4 = osp.join(PATH,FOUR) <NEW_LINE> kidFile1 = KidFile(path1) <NEW_LINE> kidFile2 = KidFile(path2) <NEW_LINE> kidFile3 = KidFile(path3) <NEW_LINE> kidFile4 = KidFile(path4) <NEW_LINE> assert kidFile1.suffix =='', '1 suffix should be space' <NEW_LINE> assert kidFile2.suffix =='py', '2 suffix should be py' <NEW_LINE> assert kidFile3.suffix =='', '3 suffix should be space.' <NEW_LINE> assert kidFile4.suffix =='png', '4 suffix should be png' <NEW_LINE> assert kidFile1.filename =='kidFile', '1 filename should be kidFile' <NEW_LINE> assert kidFile2.filename =='codeVs.py', '2 filename should be codeVs.py' <NEW_LINE> assert kidFile3.filename =='.docker', '3 filename should be .docker' <NEW_LINE> assert kidFile4.filename =='.docker.png', '4 filename should be .docker.png' <NEW_LINE> assert kidFile1.name =='kidFile', '1 name should be kidFile' <NEW_LINE> assert kidFile2.name =='codeVs', '2 name should be kidFile2' <NEW_LINE> assert kidFile3.name =='.docker', '3 name should be kidFile3' <NEW_LINE> assert kidFile4.name =='.docker', '4 name should be .docker' <NEW_LINE> assert kidFile1.path ==PATH, '1 path should be {}'.format(PATH) <NEW_LINE> assert kidFile2.path ==PATH, '2 path should be {}'.format(PATH) <NEW_LINE> assert kidFile3.path ==PATH, '3 path should be {}'.format(PATH) <NEW_LINE> assert kidFile4.path ==PATH, '4 path should be {}'.format(PATH) | KidFile | 625941b1711fe17d825420fb |
@registry.register_hparams <NEW_LINE> def basic_conv_l1(): <NEW_LINE> <INDENT> hparams = basic_conv() <NEW_LINE> hparams.target_modality = "video:l1" <NEW_LINE> hparams.video_modality_loss_cutoff = 3.0 <NEW_LINE> return hparams | Basic conv model with L1 modality. | 625941b1b5575c28eb68dd7b |
def webmks(request): <NEW_LINE> <INDENT> vm_name = request.GET.get("url") <NEW_LINE> weburl = get_webmks_url(vm_name) <NEW_LINE> if weburl == 0: <NEW_LINE> <INDENT> return HttpResponse("Server is not powerOn!") <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> url = { 'weburl':weburl } <NEW_LINE> return render(request, 'webmks/webconsole.html', url) | vmware vsphere virtual machine web console | 625941b1f9cc0f698b140385 |
def find_missing_images_in_es_via_swift(es, days): <NEW_LINE> <INDENT> imageList = get_swift_images(es, days) <NEW_LINE> docs = generate_image_docs(imageList) <NEW_LINE> docsNotFound = {} <NEW_LINE> if len(docs) > 0: <NEW_LINE> <INDENT> response = multi_get_from_es_index(index=os.environ['ES_REGISTRY_INDEX'], doc_type=os.environ['ES_REGISTRY_IMAGE_TYPE'], body={"ids" : docs.keys()}, _source=False, fields=[]) <NEW_LINE> for item in response['docs']: <NEW_LINE> <INDENT> if item['found'] == False: <NEW_LINE> <INDENT> docsNotFound[item['_id']] = docs.get(item['_id']) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> log(str(len(docsNotFound)) + ' missing images identified in ES....') <NEW_LINE> return docsNotFound | Get the registry Image names that are present in Swift but absent in the registry ES index
@return List of image ids | 625941b13c8af77a43ae3526 |
@login_required <NEW_LINE> def admin_moderate_del(request, msg_id): <NEW_LINE> <INDENT> u <NEW_LINE> msg = get_object_or_404(Message, id=msg_id) <NEW_LINE> if request.method == 'POST': <NEW_LINE> <INDENT> form = CauseForm(request.POST) <NEW_LINE> if form.is_valid(): <NEW_LINE> <INDENT> if ( msg.group and msg.group.is_staff(request.user) ) or request.user.is_superuser: <NEW_LINE> <INDENT> msg.delete() <NEW_LINE> messages.add_message( request, messages.SUCCESS, 'Сообщение удалено.' ) <NEW_LINE> domain = Site.objects.get_current().domain <NEW_LINE> email_data = msg.group.get_email_data() <NEW_LINE> text = form.cleaned_data['text'] <NEW_LINE> if not text: <NEW_LINE> <INDENT> text = u'Не указана.' <NEW_LINE> <DEDENT> threading_simple_send_mail( 'mail/conference_failure.html', u'Отказано в публикации сообщения', [msg.user.email], { 'message': u'Причина отказа: %s' % text, 'domain':domain, 'sign':email_data['sign'] }, from_email = email_data['from_email'], connection = email_data['backend'], sender_name = msg.group.get_title(), ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise Http404() <NEW_LINE> <DEDENT> return HttpResponseReload(request) <NEW_LINE> <DEDENT> <DEDENT> raise Http404() | Модерация. Удалить сообщение. | 625941b156ac1b37e6263f62 |
def GetPC(self): <NEW_LINE> <INDENT> return _lldb.SBFrame_GetPC(self) | GetPC(SBFrame self) -> lldb::addr_t | 625941b1507cdc57c6306a50 |
def test_post_ois_data(self, testapp): <NEW_LINE> <INDENT> department = Department.create(name="Good Police Department", short_name="GPD", load_defaults=False) <NEW_LINE> extractor, envs = Extractor.from_department_and_password(department=department, password="password") <NEW_LINE> testapp.authorization = ('Basic', (extractor.username, 'password')) <NEW_LINE> test_client = JSONTestClient() <NEW_LINE> ois_count = 1 <NEW_LINE> ois_data = test_client.get_prebaked_ois(last=ois_count) <NEW_LINE> response = testapp.post_json("/data/OIS", params={'month': 0, 'year': 0, 'data': ois_data}) <NEW_LINE> assert response.status_code == 200 <NEW_LINE> assert response.json_body['updated'] == 0 <NEW_LINE> assert response.json_body['added'] == ois_count <NEW_LINE> cleaner = Cleaners() <NEW_LINE> sent_ois = ois_data[0] <NEW_LINE> check_ois = OfficerInvolvedShooting.query.filter_by(opaque_id=sent_ois['opaqueId']).first() <NEW_LINE> assert check_ois.occured_date.strftime('%Y-%m-%d %-H:%-M:%S') == sent_ois['occuredDate'] <NEW_LINE> assert check_ois.division == cleaner.capitalize(sent_ois['division']) <NEW_LINE> assert check_ois.precinct == cleaner.capitalize(sent_ois['precinct']) <NEW_LINE> assert check_ois.shift == cleaner.capitalize(sent_ois['shift']) <NEW_LINE> assert check_ois.beat == cleaner.capitalize(sent_ois['beat']) <NEW_LINE> assert check_ois.disposition == sent_ois['disposition'] <NEW_LINE> assert check_ois.resident_race == cleaner.race(sent_ois['residentRace']) <NEW_LINE> assert check_ois.resident_sex == cleaner.sex(sent_ois['residentSex']) <NEW_LINE> assert check_ois.resident_age == cleaner.number_to_string(sent_ois['residentAge']) <NEW_LINE> assert check_ois.resident_weapon_used == cleaner.resident_weapon_used(sent_ois['residentWeaponUsed']) <NEW_LINE> assert check_ois.resident_condition == sent_ois['residentCondition'] <NEW_LINE> assert check_ois.officer_identifier == sent_ois['officerIdentifier'] <NEW_LINE> assert check_ois.officer_weapon_used == sent_ois['officerForceType'] <NEW_LINE> assert check_ois.officer_race == cleaner.race(sent_ois['officerRace']) <NEW_LINE> assert check_ois.officer_sex == cleaner.sex(sent_ois['officerSex']) <NEW_LINE> assert check_ois.officer_age == cleaner.number_to_string(sent_ois['officerAge']) <NEW_LINE> assert check_ois.officer_years_of_service == cleaner.string_to_integer(sent_ois['officerYearsOfService']) <NEW_LINE> assert check_ois.officer_condition == sent_ois['officerCondition'] | New OIS data from the extractor is processed as expected.
| 625941b166673b3332b91e15 |
def pushButtonSaveToFileClicked(self): <NEW_LINE> <INDENT> fileDir = self.settings.value('fileDir') or '' <NEW_LINE> file = QtWidgets.QFileDialog.getSaveFileName(self, "Save new Data File", fileDir, 'TreeTime Files (*.trt)')[0] <NEW_LINE> if file != '': <NEW_LINE> <INDENT> self.labelCurrentFile.setText(file) <NEW_LINE> self.writeToFile() <NEW_LINE> self.setWindowTitle("TreeTime - " + file) <NEW_LINE> self.settings.setValue('fileDir', os.path.dirname(file)) <NEW_LINE> self.settings.setValue('lastFile', file) | Callback for the save-file button. Saves the current data to a new file and keeps that file connected. | 625941b11f5feb6acb0c48dc |
def get_color(self, key): <NEW_LINE> <INDENT> return Gdk.Color(*self.get_value(key).unpack()) | Returns a Gdk.Color using the values from the specified setting, which
should be a 3-tuple of RGB values | 625941b1d164cc6175782acd |
def rvs(self, dim, size=1, random_state=None): <NEW_LINE> <INDENT> random_state = self._get_random_state(random_state) <NEW_LINE> size = int(size) <NEW_LINE> if size > 1: <NEW_LINE> <INDENT> return np.array([self.rvs(dim, size=1, random_state=random_state) for i in range(size)]) <NEW_LINE> <DEDENT> dim = self._process_parameters(dim) <NEW_LINE> H = np.eye(dim) <NEW_LINE> for n in range(dim): <NEW_LINE> <INDENT> x = random_state.normal(size=(dim-n,)) <NEW_LINE> D = np.sign(x[0]) if x[0] != 0 else 1 <NEW_LINE> x[0] += D*np.sqrt((x*x).sum()) <NEW_LINE> Hx = -D*(np.eye(dim-n) - 2.*np.outer(x, x)/(x*x).sum()) <NEW_LINE> mat = np.eye(dim) <NEW_LINE> mat[n:, n:] = Hx <NEW_LINE> H = np.dot(H, mat) <NEW_LINE> <DEDENT> return H | Draw random samples from O(N).
Parameters
----------
dim : integer
Dimension of rotation space (N).
size : integer, optional
Number of samples to draw (default 1).
Returns
-------
rvs : ndarray or scalar
Random size N-dimensional matrices, dimension (size, dim, dim) | 625941b1d99f1b3c44c67320 |
def check_packet_record(pckt, data): <NEW_LINE> <INDENT> return (data[DOMAIN_CELL_INDEX] == pckt[DNSQR].qname) and (pckt[DNSQR].qclass is IN_CLASS) | :param pckt: sniffed dns query packet
:param data: data line from database dns records
:return: boolean value if packet matches line | 625941b1956e5f7376d70bfc |
def bempp_grid_from_data_set(grid_data_set): <NEW_LINE> <INDENT> from bempp.api import grid_from_element_data <NEW_LINE> return grid_from_element_data(grid_data_set.grid.vertices, grid_data_set.grid.elements, grid_data_set.grid.domain_indices) | Convert a grid data set to Bempp grid. | 625941b1627d3e7fe0d68bce |
def _build_wsdl_url_from_endpoint(self): <NEW_LINE> <INDENT> raise NotImplementedError | you need to implement this method in subclasses, each service has
different wsdl file locations | 625941b18e7ae83300e4ad53 |
def test_manage_snapshot_missing_volume_id(self): <NEW_LINE> <INDENT> body = {'snapshot': {'ref': 'fake_ref'}} <NEW_LINE> res = self._get_resp_post(body) <NEW_LINE> self.assertEqual(HTTPStatus.BAD_REQUEST, res.status_int) | Test correct failure when volume_id is not specified. | 625941b1099cdd3c635f09e5 |
def register(self, account, markup=0.01, **tx_arguments): <NEW_LINE> <INDENT> tx_args = self.tx_arguments(**tx_arguments) <NEW_LINE> account = Web3.toChecksumAddress(account) <NEW_LINE> tx_receipt = self.sc.registerVendor( account, int(markup * self.precision), tx_args) <NEW_LINE> tx_receipt.info() <NEW_LINE> receipt_to_log(tx_receipt, self.log) <NEW_LINE> return tx_receipt | Allows to register a vendor
@param account Vendor address
@param markup Markup which vendor will perceive from mint/redeem operations | 625941b185dfad0860c3abdd |
def finalize_install(): <NEW_LINE> <INDENT> subprocess.call(["rm", "-f", tsconfig.CONFIG_PATH + '/dnsmasq.leases']) <NEW_LINE> console_log("Updating system parameters...") <NEW_LINE> i = 1 <NEW_LINE> system_update = False <NEW_LINE> while i < 10: <NEW_LINE> <INDENT> time.sleep(20) <NEW_LINE> LOG.info("Attempt %d to update system parameters..." % i) <NEW_LINE> try: <NEW_LINE> <INDENT> if sysinv.update_clone_system('Cloned_from_' + clone_name, utils.get_controller_hostname()): <NEW_LINE> <INDENT> system_update = True <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> except Exception: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> i += 1 <NEW_LINE> <DEDENT> if not system_update: <NEW_LINE> <INDENT> LOG.error("System update failed") <NEW_LINE> raise CloneFail("System update failed") <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> output = subprocess.check_output(["finish_install_clone.sh"], stderr=subprocess.STDOUT) <NEW_LINE> LOG.info("finish_install_clone out: {}".format(output)) <NEW_LINE> <DEDENT> except Exception: <NEW_LINE> <INDENT> console_log("Failed to cleanup stale OpenStack resources. " "Manually delete the Volumes and Instances.") | Complete the installation | 625941b1293b9510aa2c301a |
def __init__(self): <NEW_LINE> <INDENT> self._contents = [[Color.Blank for _ in range(Board.SIZE)] for _ in range(Board.SIZE)] | Initialize a blank Board, of size Board.SIZE x Board.SIZE | 625941b17c178a314d6ef1d7 |
def init(args): <NEW_LINE> <INDENT> wm = create_watermark(get_correct_wm(args, __name__.split('.')[-1])) <NEW_LINE> return Lsb(args.bands, args.dest_dir, args.format, wm, args.suffix, args.position) | Returns initialized Lsb (writer) object from arguments passed from
command line. | 625941b1b5575c28eb68dd7d |
def scanfourier(original_image, minimum_intensity_threshold, size_of_scan_box, ring_threshold, rastering_interval, image_crop_factor): <NEW_LINE> <INDENT> cropped_center, minimum_peak_separation_distance, pixel_distances = setup_fourier_scan(image_crop_factor, size_of_scan_box) <NEW_LINE> num_x_rasters = int((original_image.shape[0] - size_of_scan_box) / rastering_interval) <NEW_LINE> num_y_rasters = int((original_image.shape[1] - size_of_scan_box) / rastering_interval) <NEW_LINE> results_array = np.zeros((num_x_rasters, num_y_rasters)) <NEW_LINE> for x_bin in range(num_x_rasters): <NEW_LINE> <INDENT> for y_bin in range(num_y_rasters): <NEW_LINE> <INDENT> ft_of_subimage = get_ft_of_subimage(image_crop_factor, original_image, rastering_interval, size_of_scan_box, x_bin, y_bin) <NEW_LINE> with np.errstate(divide='ignore'): <NEW_LINE> <INDENT> ft_of_subimage = np.log10(ft_of_subimage) <NEW_LINE> <DEDENT> maxima = find_maxima_in_image(ft_of_subimage, minimum_intensity_threshold, minimum_peak_separation_distance) <NEW_LINE> crystal_type = classify_image_region(ft_of_subimage, maxima, ring_threshold, pixel_distances, cropped_center) <NEW_LINE> results_array[x_bin, y_bin] = crystal_type <NEW_LINE> <DEDENT> <DEDENT> return results_array | The main loop. Raster over an image and determine whether the subsections are crystalline.
:param original_image: The original image to process
:param minimum_intensity_threshold: The minimum intensity of a peak in fourier transform
:param size_of_scan_box: The size of the region which is Fourier transformed
:param ring_threshold: The threshold defining the boundary between crystal and liquid regions
:param rastering_interval: The interval in pixels over which to raster the Fourier transform
:param image_crop_factor: A float giving the proportion of the image to cut off each side
:return: | 625941b150812a4eaa59c0ab |
def start(self): <NEW_LINE> <INDENT> raise NotImplemented | start the task | 625941b1adb09d7d5db6c51c |
def get_clusters_at(self, level = 0): <NEW_LINE> <INDENT> self.construct_name(force = False) <NEW_LINE> if not hasattr(self,'clusters'): <NEW_LINE> <INDENT> self.clusters = xr.open_dataarray(self.filepath) <NEW_LINE> <DEDENT> if level is not None: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return(self.clusters.sel(dissim_threshold = level, method = 'nearest', tolerance = 1e-7)) <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> raise KeyError('desired level not present in the loaded clustering dataset') | Method to get the georeferenced cluster array at a certain dissimilarity level
If level = None, the cluster dataset is loaded but none are returned.
NOTE: Because NA's are probably present, xarray will convert the integer clustid's to the float32 dtype. | 625941b145492302aab5e043 |
def _unstar_repo(owner, repo): <NEW_LINE> <INDENT> api_result = _send_delete_request('https://api.github.com/user/starred/' + owner + '/' + repo, CREDENTIAL) <NEW_LINE> return api_result | you can execute the command like below in your terminal for test(not include prompt symbol '$'):
$ curl -i -u "mynameisny" -X DELETE -H "Content-Length: 0" "https://api.github.com/user/starred/ningyu/demo" | 625941b131939e2706e4cbfa |
def which(program): <NEW_LINE> <INDENT> def _is_exe(fpath): <NEW_LINE> <INDENT> return os.path.isfile(fpath) and os.access(fpath, os.X_OK) <NEW_LINE> <DEDENT> fpath, fname = os.path.split(program) <NEW_LINE> if fpath and _is_exe(program): <NEW_LINE> <INDENT> return program <NEW_LINE> <DEDENT> for path in os.environ['PATH'].split(os.pathsep): <NEW_LINE> <INDENT> path = path.strip('"') <NEW_LINE> exe_file = os.path.join(path, program) <NEW_LINE> if _is_exe(exe_file): <NEW_LINE> <INDENT> return exe_file <NEW_LINE> <DEDENT> <DEDENT> return None | Get the path of an executable program in the $PATH
environment variable
:param program: str Name of the executable
:return: str Full path to the executable in $PATH or
None if not found | 625941b16fece00bbac2d4bb |
def fontHeight(self, font=None): <NEW_LINE> <INDENT> portsaver = _PortSaver(self) <NEW_LINE> self._prepareToDraw() <NEW_LINE> if font: self._setFont(font) <NEW_LINE> fontinfo = Qd.GetFontInfo() <NEW_LINE> return fontinfo[0] + fontinfo[1] + fontinfo[3] | Find the line height of the given font. | 625941b1d8ef3951e32432be |
Subsets and Splits