Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
500
def get_python_shell(): env = os.environ shell = "shell" program = os.path.basename(env["_"]) if "jupyter-notebook" in program: shell = "jupyter-notebook" elif "JPY_PARENT_PID" in env or "ipython" in program: shell = "ipython" if "JPY_PARENT_PID" in env: shell = "ipython-notebook" return shell
Determine python shell get_python_shell() returns 'shell' (started python on command line using "python") 'ipython' (started ipython on command line using "ipython") 'ipython-notebook' (e.g., running in Spyder or started with "ipython qtconsole") 'jupyter-notebook' (running in a Jupyter notebook) See also https://stackoverflow.com/a/37661854
501
def snakescan(xi, yi, xf, yf): dx = 1 if xf >= xi else -1 dy = 1 if yf >= yi else -1 x, xa, xb = xi, xi, xf for y in range(yi, yf + dy, dy): for x in range(xa, xb + dx, dx): yield x, y if x == xa or x == xb: dx *= -1 xa, xb = xb, xa
Scan pixels in a snake pattern along the x-coordinate then y-coordinate :param xi: Initial x-coordinate :type xi: int :param yi: Initial y-coordinate :type yi: int :param xf: Final x-coordinate :type xf: int :param yf: Final y-coordinate :type yf: int :returns: Coordinate generator :rtype: function
502
def _rectify_countdown_or_bool(count_or_bool): if count_or_bool is True or count_or_bool is False: count_or_bool_ = count_or_bool elif isinstance(count_or_bool, int): if count_or_bool == 0: return 0 elif count_or_bool > 0: count_or_bool_ = count_or_bool - 1 else: count_or_bool_ = count_or_bool else: count_or_bool_ = False return count_or_bool_
used by recursive functions to specify which level to turn a bool on in counting down yields True, True, ..., False counting up yields False, False, False, ... True Args: count_or_bool (bool or int): if positive and an integer, it will count down, otherwise it will remain the same. Returns: int or bool: count_or_bool_ CommandLine: python -m utool.util_str --test-_rectify_countdown_or_bool Example: >>> from ubelt.util_format import _rectify_countdown_or_bool # NOQA >>> count_or_bool = True >>> a1 = (_rectify_countdown_or_bool(2)) >>> a2 = (_rectify_countdown_or_bool(1)) >>> a3 = (_rectify_countdown_or_bool(0)) >>> a4 = (_rectify_countdown_or_bool(-1)) >>> a5 = (_rectify_countdown_or_bool(-2)) >>> a6 = (_rectify_countdown_or_bool(True)) >>> a7 = (_rectify_countdown_or_bool(False)) >>> a8 = (_rectify_countdown_or_bool(None)) >>> result = [a1, a2, a3, a4, a5, a6, a7, a8] >>> print(result) [1, 0, 0, -1, -2, True, False, False]
503
def _verify(self, valid_subscriptions, fix): if self.subscription not in valid_subscriptions: if fix: logger.debug("RosterItem.from_xml: got unknown :" " {0!r}, changing to None".format(self.subscription)) self.subscription = None else: raise ValueError("Bad ") if self.ask not in (None, u"subscribe"): if fix: logger.debug("RosterItem.from_xml: got unknown :" " {0!r}, changing to None".format(self.ask)) self.ask = None else: raise ValueError("Bad ")
Check if `self` is valid roster item. Valid item must have proper `subscription` and valid value for 'ask'. :Parameters: - `valid_subscriptions`: sequence of valid subscription values - `fix`: if `True` than replace invalid 'subscription' and 'ask' values with the defaults :Types: - `fix`: `bool` :Raise: `ValueError` if the item is invalid.
504
def open(self): if self._is_open: raise HIDException("Failed to open device: HIDDevice already open") path = self.path.encode() dev = hidapi.hid_open_path(path) if dev: self._is_open = True self._device = dev else: raise HIDException("Failed to open device")
Open the HID device for reading and writing.
505
def _build_str_from_chinese(chinese_items): year, month, day = chinese_items year = reduce(lambda a, b: a*10+b, map(CHINESE_NUMS.find, year)) return % (year, _parse_chinese_field(month), _parse_chinese_field(day))
根据解析出的中文时间字符串的关键字返回对应的标准格式字符串
506
def delete_workspace_config(namespace, workspace, cnamespace, config): uri = "workspaces/{0}/{1}/method_configs/{2}/{3}".format(namespace, workspace, cnamespace, config) return __delete(uri)
Delete method configuration in workspace. Args: namespace (str): project to which workspace belongs workspace (str): Workspace name mnamespace (str): Method namespace method (str): Method name Swagger: https://api.firecloud.org/#!/Method_Configurations/deleteWorkspaceMethodConfig
507
def print_row(self, **kwargs): meta_string = for key in self.column_names: float_specifier = if isinstance(kwargs[key], float): float_specifier = meta_string += " {%s:<{width}%s}|" % (key, float_specifier) kwargs[] = self.column_width - 1 print(meta_string.format(**kwargs)) print(self.hr)
keys of kwargs must be the names passed to __init__(...) as `column_names`
508
def spcol(x,knots,spline_order): colmat = np.nan*np.ones((len(x),len(knots) - spline_order-1)) for i in range(0,len(knots) - spline_order -1): colmat[:,i] = spline(x,knots,spline_order,i) return colmat
Computes the spline colocation matrix for knots in x. The spline collocation matrix contains all m-p-1 bases defined by knots. Specifically it contains the ith basis in the ith column. Input: x: vector to evaluate the bases on knots: vector of knots spline_order: order of the spline Output: colmat: m x m-p matrix The colocation matrix has size m x m-p where m denotes the number of points the basis is evaluated on and p is the spline order. The colums contain the ith basis of knots evaluated on x.
509
def _rebuild_all_command_chains(self): self._commands_by_name = {} for command in self._commands: self._build_command_chain(command)
Rebuilds execution chain for all registered commands. This method is typically called when intercepters are changed. Because of that it is more efficient to register intercepters before registering commands (typically it will be done in abstract classes). However, that performance penalty will be only once during creation time.
510
def to_html(ds: Any) -> str: rm = min(10, ds.shape[0]) cm = min(10, ds.shape[1]) html = "<p>" if ds.attrs.__contains__("title"): html += "<strong>" + ds.attrs["title"] + "</strong> " html += f"{ds.shape[0]} rows, {ds.shape[1]} columns, {len(ds.layers)} layer{ if len(ds.layers) > 1 else }<br/>(showing up to 10x10)<br/>" html += ds.filename + "<br/>" for (name, val) in ds.attrs.items(): html += f"name: <em>{val}</em><br/>" html += "<table>" for ca in ds.col_attrs.keys(): html += "<tr>" for ra in ds.row_attrs.keys(): html += "<td>&nbsp;</td>" html += "<td><strong>" + ca + "</strong></td>" for v in ds.col_attrs[ca][:cm]: html += "<td>" + str(v) + "</td>" if ds.shape[1] > cm: html += "<td>...</td>" html += "</tr>" html += "<tr>" for ra in ds.row_attrs.keys(): html += "<td><strong>" + ra + "</strong></td>" html += "<td>&nbsp;</td>" for v in range(cm): html += "<td>&nbsp;</td>" if ds.shape[1] > cm: html += "<td>...</td>" html += "</tr>" for row in range(rm): html += "<tr>" for ra in ds.row_attrs.keys(): html += "<td>" + str(ds.row_attrs[ra][row]) + "</td>" html += "<td>&nbsp;</td>" for v in ds[row, :cm]: html += "<td>" + str(v) + "</td>" if ds.shape[1] > cm: html += "<td>...</td>" html += "</tr>" if ds.shape[0] > rm: html += "<tr>" for v in range(rm + 1 + len(ds.row_attrs.keys())): html += "<td>...</td>" if ds.shape[1] > cm: html += "<td>...</td>" html += "</tr>" html += "</table>" return html
Return an HTML representation of the loom file or view, showing the upper-left 10x10 corner.
511
def _clopper_pearson_confidence_interval(samples, error_rate): if optimize is None or stats is None: raise ValueError( "Scipy is required for computing Clopper-Pearson confidence intervals") if len(samples.shape) != 1: raise ValueError("Batch semantics not implemented") n = len(samples) low = np.amin(samples) high = np.amax(samples) successes = np.count_nonzero(samples - low) failures = np.count_nonzero(samples - high) if successes + failures != n: uniques = np.unique(samples) msg = ("Purportedly Bernoulli distribution had distinct samples" " {}, {}, and {}".format(uniques[0], uniques[1], uniques[2])) raise ValueError(msg) def p_small_enough(p): prob = stats.binom.logcdf(successes, n, p) return prob - np.log(error_rate / 2.) def p_big_enough(p): prob = stats.binom.logsf(successes, n, p) return prob - np.log(error_rate / 2.) high_p = optimize.brentq( p_small_enough, float(successes) / n, 1., rtol=1e-9) low_p = optimize.brentq( p_big_enough, 0., float(successes) / n, rtol=1e-9) low_interval = low + (high - low) * low_p high_interval = low + (high - low) * high_p return (low_interval, high_interval)
Computes a confidence interval for the mean of the given 1-D distribution. Assumes (and checks) that the given distribution is Bernoulli, i.e., takes only two values. This licenses using the CDF of the binomial distribution for the confidence, which is tighter (for extreme probabilities) than the DKWM inequality. The method is known as the [Clopper-Pearson method] (https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval). Assumes: - The given samples were drawn iid from the distribution of interest. - The given distribution is a Bernoulli, i.e., supported only on low and high. Guarantees: - The probability (over the randomness of drawing the given sample) that the true mean is outside the returned interval is no more than the given error_rate. Args: samples: `np.ndarray` of samples drawn iid from the distribution of interest. error_rate: Python `float` admissible rate of mistakes. Returns: low: Lower bound of confidence interval. high: Upper bound of confidence interval. Raises: ValueError: If `samples` has rank other than 1 (batch semantics are not implemented), or if `samples` contains values other than `low` or `high` (as that makes the distribution not Bernoulli).
512
def get_version_path(self, version=None): archa1vera20.0.00.0.1a1latestNoneTypemanager version = _process_version(self, version) if self.versioned: return fs.path.join(self.archive_path, str(version)) else: return self.archive_path
Returns a storage path for the archive and version If the archive is versioned, the version number is used as the file path and the archive path is the directory. If not, the archive path is used as the file path. Parameters ---------- version : str or object Version number to use as file name on versioned archives (default latest unless ``default_version`` set) Examples -------- .. code-block:: python >>> arch = DataArchive(None, 'arch', None, 'a1', versioned=False) >>> print(arch.get_version_path()) a1 >>> >>> ver = DataArchive(None, 'ver', None, 'a2', versioned=True) >>> print(ver.get_version_path('0.0.0')) a2/0.0 >>> >>> print(ver.get_version_path('0.0.1a1')) a2/0.0.1a1 >>> >>> print(ver.get_version_path('latest')) # doctest: +ELLIPSIS Traceback (most recent call last): ... AttributeError: 'NoneType' object has no attribute 'manager'
513
def results(self, use_cache=True, dialect=None, billing_tier=None): if not use_cache or (self._results is None): self.execute(use_cache=use_cache, dialect=dialect, billing_tier=billing_tier) return self._results.results
Retrieves table of results for the query. May block if the query must be executed first. Args: use_cache: whether to use cached results or not. Ignored if append is specified. dialect : {'legacy', 'standard'}, default 'legacy' 'legacy' : Use BigQuery's legacy SQL dialect. 'standard' : Use BigQuery's standard SQL (beta), which is compliant with the SQL 2011 standard. billing_tier: Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default. This can also be used to override your project-wide default billing tier on a per-query basis. Returns: A QueryResultsTable containing the result set. Raises: Exception if the query could not be executed or query response was malformed.
514
def identify(self, req, resp, resource, uri_kwargs): header = req.get_header("Authorization", False) auth = header.split(" ") if header else None if auth is None or auth[0].lower() != : return None if len(auth) != 2: raise HTTPBadRequest( "Invalid Authorization header", "The Authorization header for Basic auth should be in form:\n" "Authorization: Basic <base64-user-pass>" ) user_pass = auth[1] try: decoded = base64.b64decode(user_pass).decode() except (TypeError, UnicodeDecodeError, binascii.Error): raise HTTPBadRequest( "Invalid Authorization header", "Credentials for Basic auth not correctly base64 encoded." ) username, _, password = decoded.partition(":") return username, password
Identify user using Authenticate header with Basic auth.
515
def update(self, fields=None, **kwargs): kwargs = kwargs.copy() kwargs.update(self._server_config.get_client_kwargs()) headers = kwargs.pop(, {}) headers[] = kwargs[] = headers return client.put( self.path(), fields, **kwargs )
Update the current entity. Make an HTTP PUT call to ``self.path('base')``. Return the response. :param fields: An iterable of field names. Only the fields named in this iterable will be updated. No fields are updated if an empty iterable is passed in. All fields are updated if ``None`` is passed in. :return: A ``requests.response`` object.
516
def lowdata_fmt(): if cherrypy.request.method.upper() != : return data = cherrypy.request.unserialized_data if data and isinstance(data, collections.Mapping): if in data and not isinstance(data[], list): data[] = [data[]] cherrypy.request.lowstate = [data] else: cherrypy.serving.request.lowstate = data
Validate and format lowdata from incoming unserialized request data This tool requires that the hypermedia_in tool has already been run.
517
def read_secret_version(self, path, version=None, mount_point=DEFAULT_MOUNT_POINT): params = {} if version is not None: params[] = version api_path = .format(mount_point=mount_point, path=path) response = self._adapter.get( url=api_path, params=params, ) return response.json()
Retrieve the secret at the specified location. Supported methods: GET: /{mount_point}/data/{path}. Produces: 200 application/json :param path: Specifies the path of the secret to read. This is specified as part of the URL. :type path: str | unicode :param version: Specifies the version to return. If not set the latest version is returned. :type version: int :param mount_point: The "path" the secret engine was mounted on. :type mount_point: str | unicode :return: The JSON response of the request. :rtype: dict
518
def get_segment_definer_comments(xml_file, include_version=True): from glue.ligolw.ligolw import LIGOLWContentHandler as h lsctables.use_in(h) xmldoc, _ = ligolw_utils.load_fileobj(xml_file, gz=xml_file.name.endswith(".gz"), contenthandler=h) seg_def_table = table.get_table(xmldoc, lsctables.SegmentDefTable.tableName) comment_dict = {} for seg_def in seg_def_table: if include_version: full_channel_name = .join([str(seg_def.ifos), str(seg_def.name), str(seg_def.version)]) else: full_channel_name = .join([str(seg_def.ifos), str(seg_def.name)]) comment_dict[full_channel_name] = seg_def.comment return comment_dict
Returns a dict with the comment column as the value for each segment
519
def create_token(self, user): h = crypto.pbkdf2( self.get_revocation_key(user), self.salt, self.iterations, digest=self.digest, ) return self.sign(self.packer.pack_pk(user.pk) + h)
Create a signed token from a user.
520
def __within2(value, within=None, errmsg=None, dtype=None): valid, _value = False, value if dtype: try: _value = dtype(value) valid = _value in within except ValueError: pass else: valid = _value in within if errmsg is None: if dtype: typename = getattr(dtype, , hasattr(dtype, ) and getattr(dtype.__class__, , dtype)) errmsg = {1}\.format(typename, within) else: errmsg = {0}\.format(within) return (valid, _value, errmsg)
validate that a value is in ``within`` and optionally a ``dtype``
521
def all_selected_options(self): ret = [] for opt in self.options: if opt.is_selected(): ret.append(opt) return ret
Returns a list of all selected options belonging to this select tag
522
def pull_session(session_id=None, url=, io_loop=None, arguments=None): t plan to modify ``session.document`` you probably dons document into your process first. Itt need to. In a production scenario, the ``session_id`` should be unique for each browser tab, which keeps users from stomping on each other. It coords = _SessionCoordinates(session_id=session_id, url=url) session = ClientSession(session_id=session_id, websocket_url=websocket_url_for_server_url(coords.url), io_loop=io_loop, arguments=arguments) session.pull() return session
Create a session by loading the current server-side document. ``session.document`` will be a fresh document loaded from the server. While the connection to the server is open, changes made on the server side will be applied to this document, and changes made on the client side will be synced to the server. If you don't plan to modify ``session.document`` you probably don't need to use this function; instead you can directly ``show_session()`` or ``server_session()`` without downloading the session's document into your process first. It's much more efficient to avoid downloading the session if you don't need to. In a production scenario, the ``session_id`` should be unique for each browser tab, which keeps users from stomping on each other. It's neither scalable nor secure to use predictable session IDs or to share session IDs across users. For a notebook running on a single machine, ``session_id`` could be something human-readable such as ``"default"`` for convenience. If you allow ``pull_session()`` to generate a unique ``session_id``, you can obtain the generated ID with the ``id`` property on the returned ``ClientSession``. Args: session_id (string, optional) : The name of the session, None to autogenerate a random one (default: None) url : (str, optional): The URL to a Bokeh application on a Bokeh server can also be `"default"` which will connect to the default app URL io_loop (``tornado.ioloop.IOLoop``, optional) : The ``IOLoop`` to use for the websocket arguments (dict[str, str], optional) : A dictionary of key/values to be passed as HTTP request arguments to Bokeh application code (default: None) Note that should only be provided when pulling new sessions. If ``session_id`` is not None, or a session with ``session_id`` already exists, these arguments will have no effect. Returns: ClientSession : A new ``ClientSession`` connected to the server
523
def configure(self, options, conf): self.conf = conf if hasattr(options, self.enable_opt): self.enabled = getattr(options, self.enable_opt)
Configure the plugin and system, based on selected options. The base plugin class sets the plugin to enabled if the enable option for the plugin (self.enable_opt) is true.
524
def make_tz_aware(dt, tz=, is_dst=None): dt = make_datetime(dt) if not isinstance(dt, (list, datetime.datetime, datetime.date, datetime.time, pd.Timestamp)): return dt try: tz = dt.tzinfo or tz except (ValueError, AttributeError, TypeError): pass try: tzstr = str(tz).strip().upper() if tzstr in TZ_ABBREV_NAME: is_dst = is_dst or tzstr.endswith() tz = TZ_ABBREV_NAME.get(tzstr, tz) except (ValueError, AttributeError, TypeError): pass try: tz = pytz.timezone(tz) except (ValueError, AttributeError, TypeError): pass try: return tz.localize(dt, is_dst=is_dst) except (ValueError, AttributeError, TypeError): pass if not isinstance(dt, list): return dt.replace(tzinfo=tz) return [make_tz_aware(dt0, tz=tz, is_dst=is_dst) for dt0 in dt]
Add timezone information to a datetime object, only if it is naive. >>> make_tz_aware(datetime.datetime(2001, 9, 8, 7, 6)) datetime.datetime(2001, 9, 8, 7, 6, tzinfo=<UTC>) >>> make_tz_aware(['2010-01-01'], 'PST') [datetime.datetime(2010, 1, 1, 0, 0, tzinfo=<DstTzInfo 'US/Pacific' PST-1 day, 16:00:00 STD>)] >>> make_tz_aware(['1970-10-31', '1970-12-25', '1971-07-04'], 'CDT') # doctest: +NORMALIZE_WHITESPACE [datetime.datetime(1970, 10, 31, 0, 0, tzinfo=<DstTzInfo 'US/Central' CST-1 day, 18:00:00 STD>), datetime.datetime(1970, 12, 25, 0, 0, tzinfo=<DstTzInfo 'US/Central' CST-1 day, 18:00:00 STD>), datetime.datetime(1971, 7, 4, 0, 0, tzinfo=<DstTzInfo 'US/Central' CDT-1 day, 19:00:00 DST>)] >>> make_tz_aware([None, float('nan'), float('inf'), 1980, 1979.25*365.25, '1970-10-31', ... '1970-12-25', '1971-07-04'], ... 'CDT') # doctest: +NORMALIZE_WHITESPACE [None, nan, inf, datetime.datetime(6, 6, 3, 0, 0, tzinfo=<DstTzInfo 'US/Central' LMT-1 day, 18:09:00 STD>), datetime.datetime(1980, 4, 16, 1, 30, tzinfo=<DstTzInfo 'US/Central' CST-1 day, 18:00:00 STD>), datetime.datetime(1970, 10, 31, 0, 0, tzinfo=<DstTzInfo 'US/Central' CST-1 day, 18:00:00 STD>), datetime.datetime(1970, 12, 25, 0, 0, tzinfo=<DstTzInfo 'US/Central' CST-1 day, 18:00:00 STD>), datetime.datetime(1971, 7, 4, 0, 0, tzinfo=<DstTzInfo 'US/Central' CDT-1 day, 19:00:00 DST>)] >>> make_tz_aware(datetime.time(22, 23, 59, 123456)) datetime.time(22, 23, 59, 123456, tzinfo=<UTC>) >>> make_tz_aware(datetime.time(22, 23, 59, 123456), 'PDT', is_dst=True) datetime.time(22, 23, 59, 123456, tzinfo=<DstTzInfo 'US/Pacific' LMT-1 day, 16:07:00 STD>)
525
def _domain_event_pmsuspend_cb(conn, domain, reason, opaque): _salt_send_domain_event(opaque, conn, domain, opaque[], { : })
Domain suspend events handler
526
def _get_subject_public_key(cert): public_key = cert.get_pubkey() cryptographic_key = public_key.to_cryptography_key() subject_public_key = cryptographic_key.public_bytes(Encoding.DER, PublicFormat.PKCS1) return subject_public_key
Returns the SubjectPublicKey asn.1 field of the SubjectPublicKeyInfo field of the server's certificate. This is used in the server verification steps to thwart MitM attacks. :param cert: X509 certificate from pyOpenSSL .get_peer_certificate() :return: byte string of the asn.1 DER encoded SubjectPublicKey field
527
def nunique(expr): output_type = types.int64 if isinstance(expr, SequenceExpr): return NUnique(_value_type=output_type, _inputs=[expr]) elif isinstance(expr, SequenceGroupBy): return GroupedNUnique(_data_type=output_type, _inputs=[expr.to_column()], _grouped=expr.input) elif isinstance(expr, CollectionExpr): unique_input = _extract_unique_input(expr) if unique_input: return nunique(unique_input) else: return NUnique(_value_type=types.int64, _inputs=expr._project_fields) elif isinstance(expr, GroupBy): if expr._to_agg: inputs = expr.input[expr._to_agg.names]._project_fields else: inputs = expr.input._project_fields return GroupedNUnique(_data_type=types.int64, _inputs=inputs, _grouped=expr)
The distinct count. :param expr: :return:
528
def get_child(self, streamId, childId, options={}): return self.get( + streamId + + childId, options)
Get the child of a stream.
529
def get_top_n_meanings(strings, n): scored_strings = [(s, score_meaning(s)) for s in strings] scored_strings.sort(key=lambda tup: -tup[1]) return scored_strings[:n]
Returns (text, score) for top n strings
530
def get_resource_metadata(self, resource=None): result = self._make_metadata_request(meta_id=0, metadata_type=) if resource: result = next((item for item in result if item[] == resource), None) return result
Get resource metadata :param resource: The name of the resource to get metadata for :return: list
531
def screener(molecules, ensemble, sort_order): modified_molecules = [] for index in range(len(molecules)): modified_molecules.append(molecules[index]) scores = [] for query in ensemble: scores.append( (molecules[index].GetProp(query), query)) if sort_order == : scores.sort(key=lambda x: float(x[0]), reverse=True) elif sort_order == : scores.sort(key=lambda x: float(x[0])) modified_molecules[index].SetProp(, format(scores[0][0])) modified_molecules[index].SetProp(, format(scores[0][1])) active = [] decoy = [] non_random = [] for mol in modified_molecules: if float(mol.GetProp()) == 10000.00: if mol.GetProp() == 1: active.append(mol) else: decoy.append(mol) else: non_random.append(mol) if sort_order == : non_random.sort(key=lambda mol: float(mol.GetProp()), reverse=True) elif sort_order == : non_random.sort(key=lambda mol: float(mol.GetProp())) rand = [] decoy_length = len(decoy) active_length = len(active) if decoy_length > active_length: for a, d in zip(active, decoy[0:active_length]): rand.append(a) rand.append(d) for d in decoy[active_length:decoy_length]: rand.append(d) elif decoy_length < active_length: for a, d in zip(active[0:decoy_length], decoy): rand.append(a) rand.append(d) for a in active[decoy_length:active_length]: rand.append(a) elif decoy_length == active_length: for a, d in zip(active, decoy): rand.append(a) rand.append(d) modified_molecules = non_random + rand return modified_molecules
Uses the virtual screening scores for the receptors, or queries, specified in ensemble to sort the molecules in molecules in the direction specified by sort_order. :param molecules: a list of molecule objects (/classification/molecules.Molecules()) :param ensemble: a tuple with receptors, or a query, that specifies a ensemble :param sort_order: 'asc' or 'dsc'. 'asc' sorts in ascending order (binding energy estimates) 'dsc' sorts in descending order (similarity scores, or binding probabilities) :return:
532
def get_ssh_key(): path = os.environ.get("TUNE_CLUSTER_SSH_KEY", os.path.expanduser("~/ray_bootstrap_key.pem")) if os.path.exists(path): return path return None
Returns ssh key to connecting to cluster workers. If the env var TUNE_CLUSTER_SSH_KEY is provided, then this key will be used for syncing across different nodes.
533
def dump_privatekey(type, pkey, cipher=None, passphrase=None): bio = _new_mem_buf() if not isinstance(pkey, PKey): raise TypeError("pkey must be a PKey") if cipher is not None: if passphrase is None: raise TypeError( "if a value is given for cipher " "one must also be given for passphrase") cipher_obj = _lib.EVP_get_cipherbyname(_byte_string(cipher)) if cipher_obj == _ffi.NULL: raise ValueError("Invalid cipher name") else: cipher_obj = _ffi.NULL helper = _PassphraseHelper(type, passphrase) if type == FILETYPE_PEM: result_code = _lib.PEM_write_bio_PrivateKey( bio, pkey._pkey, cipher_obj, _ffi.NULL, 0, helper.callback, helper.callback_args) helper.raise_if_problem() elif type == FILETYPE_ASN1: result_code = _lib.i2d_PrivateKey_bio(bio, pkey._pkey) elif type == FILETYPE_TEXT: if _lib.EVP_PKEY_id(pkey._pkey) != _lib.EVP_PKEY_RSA: raise TypeError("Only RSA keys are supported for FILETYPE_TEXT") rsa = _ffi.gc( _lib.EVP_PKEY_get1_RSA(pkey._pkey), _lib.RSA_free ) result_code = _lib.RSA_print(bio, rsa, 0) else: raise ValueError( "type argument must be FILETYPE_PEM, FILETYPE_ASN1, or " "FILETYPE_TEXT") _openssl_assert(result_code != 0) return _bio_to_string(bio)
Dump the private key *pkey* into a buffer string encoded with the type *type*. Optionally (if *type* is :const:`FILETYPE_PEM`) encrypting it using *cipher* and *passphrase*. :param type: The file type (one of :const:`FILETYPE_PEM`, :const:`FILETYPE_ASN1`, or :const:`FILETYPE_TEXT`) :param PKey pkey: The PKey to dump :param cipher: (optional) if encrypted PEM format, the cipher to use :param passphrase: (optional) if encrypted PEM format, this can be either the passphrase to use, or a callback for providing the passphrase. :return: The buffer with the dumped key in :rtype: bytes
534
def token_middleware(ctx, get_response): async def middleware(request): params = request.setdefault(, {}) if params.get("token") is None: params[] = ctx.token return await get_response(request) return middleware
Reinject token and consistency into requests.
535
def get_items(self) -> Iterator[StoryItem]: yield from (StoryItem(self._context, item, self.owner_profile) for item in reversed(self._node[]))
Retrieve all items from a story.
536
def crawl(self, urls, name=, api=, **kwargs): if isinstance(urls, list): urls = .join(urls) url = self.endpoint() process_url = self.endpoint(api) params = { : self._token, : urls, : name, : process_url, } params[] = 10 params.update(kwargs) self._get(url, params=params) return Job(self._token, name, self._version)
Crawlbot API. Returns a diffbot.Job object to check and retrieve crawl status.
537
def check_uniqueness(self, *args): self.get_unique_index().check_uniqueness(*self.prepare_args(args, transform=False))
For a unique index, check if the given args are not used twice For the parameters, seen BaseIndex.check_uniqueness
538
def refresh(self): pipe = self.redis.pipeline() pipe.hget(EXPERIMENT_REDIS_KEY_TEMPLATE % self.name, "metadata") pipe.hget(EXPERIMENT_REDIS_KEY_TEMPLATE % self.name, "choices") pipe.hget(EXPERIMENT_REDIS_KEY_TEMPLATE % self.name, "default-choice") results = pipe.execute() if results[0] == None: raise ExperimentException(self.name, "Does not exist") self.metadata = parse_json(results[0]) self.choice_names = parse_json(results[1]) if results[1] != None else [] self.default_choice = escape.to_unicode(results[2]) self._choices = None
Re-pulls the data from redis
539
def perform_word_selection(self, event=None): self.editor.setTextCursor( TextHelper(self.editor).word_under_cursor(True)) if event: event.accept()
Performs word selection :param event: QMouseEvent
540
def _strip_metadata(self, my_dict): new_dict = copy.deepcopy(my_dict) if const.START in new_dict: del new_dict[const.START] if const.END in new_dict: del new_dict[const.END] if const.WHITELIST in new_dict: del new_dict[const.WHITELIST] if const.WHITELIST_START in new_dict: del new_dict[const.WHITELIST_START] if const.WHITELIST_END in new_dict: del new_dict[const.WHITELIST_END] return new_dict
Create a copy of dict and remove not needed data
541
def retrieve(self, request, project, pk=None): try: serializer = JobNoteSerializer(JobNote.objects.get(id=pk)) return Response(serializer.data) except JobNote.DoesNotExist: return Response("No note with id: {0}".format(pk), status=HTTP_404_NOT_FOUND)
GET method implementation for a note detail
542
def subsc_search(self, article_code, **kwargs): request = TOPRequest() request[] = article_code for k, v in kwargs.iteritems(): if k not in (, , , , , , , ,) and v==None: continue request[k] = v self.create(self.execute(request), fields=[,], models={:ArticleSub}) return self.article_subs
taobao.vas.subsc.search 订购记录导出 用于ISV查询自己名下的应用及收费项目的订购记录
543
def raw_pitch_accuracy(ref_voicing, ref_cent, est_voicing, est_cent, cent_tolerance=50): validate_voicing(ref_voicing, est_voicing) validate(ref_voicing, ref_cent, est_voicing, est_cent) ref_voicing = ref_voicing.astype(bool) est_voicing = est_voicing.astype(bool) if ref_voicing.size == 0 or est_voicing.size == 0 \ or ref_cent.size == 0 or est_cent.size == 0: return 0. if ref_voicing.sum() == 0: return 0. matching_voicing = ref_voicing * (est_cent > 0) cent_diff = np.abs(ref_cent - est_cent)[matching_voicing] frame_correct = (cent_diff < cent_tolerance) raw_pitch = (frame_correct).sum()/float(ref_voicing.sum()) return raw_pitch
Compute the raw pitch accuracy given two pitch (frequency) sequences in cents and matching voicing indicator sequences. The first pitch and voicing arrays are treated as the reference (truth), and the second two as the estimate (prediction). All 4 sequences must be of the same length. Examples -------- >>> ref_time, ref_freq = mir_eval.io.load_time_series('ref.txt') >>> est_time, est_freq = mir_eval.io.load_time_series('est.txt') >>> (ref_v, ref_c, ... est_v, est_c) = mir_eval.melody.to_cent_voicing(ref_time, ... ref_freq, ... est_time, ... est_freq) >>> raw_pitch = mir_eval.melody.raw_pitch_accuracy(ref_v, ref_c, ... est_v, est_c) Parameters ---------- ref_voicing : np.ndarray Reference boolean voicing array ref_cent : np.ndarray Reference pitch sequence in cents est_voicing : np.ndarray Estimated boolean voicing array est_cent : np.ndarray Estimate pitch sequence in cents cent_tolerance : float Maximum absolute deviation for a cent value to be considerd correct (Default value = 50) Returns ------- raw_pitch : float Raw pitch accuracy, the fraction of voiced frames in ref_cent for which est_cent provides a correct frequency values (within cent_tolerance cents).
544
def assign(self, **kwargs): r data = self.copy() if PY36: for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) else: results = OrderedDict() for k, v in kwargs.items(): results[k] = com.apply_if_callable(v, data) results = sorted(results.items()) for k, v in results: data[k] = v return data
r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. For Python 3.6 and above, later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. For Python 3.5 and below, the order of keyword arguments is not specified, you cannot refer to newly created or modified columns. All items are computed first, and then assigned in alphabetical order. .. versionchanged :: 0.23.0 Keyword argument order is maintained for Python 3.6 and later. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 In Python 3.6+, you can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15
545
def endpoint(self, endpoint): def decorator(f): def register_endpoint(state): state.app.view_functions[endpoint] = f self.record_once(register_endpoint) return f return decorator
Like :meth:`Flask.endpoint` but for a blueprint. This does not prefix the endpoint with the blueprint name, this has to be done explicitly by the user of this method. If the endpoint is prefixed with a `.` it will be registered to the current blueprint, otherwise it's an application independent endpoint.
546
def parse_ipv6_literal_host(entity, default_port): if entity.find() == -1: raise ValueError("an IPv6 address literal must be " "enclosed in and according " "to RFC 2732.") i = entity.find() if i == -1: return entity[1:-1], default_port return entity[1: i], entity[i + 2:]
Validates an IPv6 literal host:port string. Returns a 2-tuple of IPv6 literal followed by port where port is default_port if it wasn't specified in entity. :Parameters: - `entity`: A string that represents an IPv6 literal enclosed in braces (e.g. '[::1]' or '[::1]:27017'). - `default_port`: The port number to use when one wasn't specified in entity.
547
def delete(self, obj): obj = self.api.get_object(getattr(obj, , obj)) obj.delete() self.remove(obj.id)
Delete an object in CDSTAR and remove it from the catalog. :param obj: An object ID or an Object instance.
548
def _get_request_type(self): value = self.document.tag.lower() if value in allowed_request_types[self.params[]]: self.params["request"] = value else: raise OWSInvalidParameterValue("Request type %s is not supported" % value, value="request") return self.params["request"]
Find requested request type in POST request.
549
def repr_args(args): res = [] for x in args: if isinstance(x, tuple) and len(x) == 2: key, value = x res += ["%s=%s" % (key, repr_arg(value))] else: res += [repr_arg(x)] return .join(res)
formats a list of function arguments prettily but as working code (kwargs are tuples (argname, argvalue)
550
def all(cls, klass, db_session=None): db_session = get_db_session(db_session) return db_session.query(klass)
returns all objects of specific type - will work correctly with sqlalchemy inheritance models, you should normally use models base_query() instead of this function its for bw. compat purposes :param klass: :param db_session: :return:
551
def check_enable_mode(self, check_string=""): self.write_channel(self.RETURN) output = self.read_until_prompt() return check_string in output
Check if in enable mode. Return boolean. :param check_string: Identification of privilege mode from device :type check_string: str
552
def jobs(request): Actionget_configMachineIdConfigSecretUUIDSubmissionFileIdMessageErrorCodeActionSecretUUIDSecretUUIDSubmissionFileIdTimeoutActionPostRunValidation try: if request.method == : secret = request.GET[] uuid = request.GET[] elif request.method == : secret = request.POST[] uuid = request.POST[] except Exception as e: logger.error( "Error finding the neccessary data in the executor request: " + str(e)) raise PermissionDenied if secret != settings.JOB_EXECUTOR_SECRET: raise PermissionDenied machine, created = TestMachine.objects.update_or_create( host=uuid, defaults={: datetime.now()}) if created: logger.debug( "Test machine is unknown, creating entry and asking executor for configuration.") response = HttpResponse() response[] = response[] = response[] = machine.pk return response if not machine.enabled: raise Http404 if request.method == "GET": pending_submissions = Submission.pending_tests.filter( file_upload__fetched__isnull=False) for sub in pending_submissions: max_delay = timedelta( seconds=sub.assignment.attachment_test_timeout) if sub.file_upload.fetched and sub.file_upload.fetched + max_delay < datetime.now(): logger.debug( "Resetting executor fetch status for submission %u, due to timeout" % sub.pk) sub.clean_fetch_date() if sub.state == Submission.TEST_VALIDITY_PENDING: sub.save_validation_result( machine, "Killed due to non-reaction. Please check your application for deadlocks or keyboard input.", "Killed due to non-reaction on timeout signals.") sub.state = Submission.TEST_VALIDITY_FAILED sub.inform_student(request, sub.state) if sub.state == Submission.TEST_FULL_PENDING: sub.save_fulltest_result( machine, "Killed due to non-reaction on timeout signals. Student not informed, since this was the full test.") sub.state = Submission.TEST_FULL_FAILED sub.save() submissions = Submission.pending_tests submissions = submissions.filter(assignment__in=machine.assignments.all()) \ .filter(file_upload__isnull=False) \ .filter(file_upload__fetched__isnull=True) if len(submissions) == 0: raise Http404 else: sub = submissions[0] sub.save_fetch_date() sub.modified = datetime.now() sub.save() f = sub.file_upload.attachment if not os.access(f.path, os.F_OK): mail_managers(, % ( sub.file_upload.pk, str(sub.file_upload.attachment)), fail_silently=True) raise Http404 response = HttpResponse(f, content_type=) response[] = response[] = % sub.file_upload.basename() response[] = str(sub.file_upload.pk) response[] = sub.file_upload.original_filename response[] = str(sub.pk) response[] = sub.submitter.get_full_name() response[] = sub.submitter.profile.student_id response[] = sub.authors.all() response[] = str(sub.submitter.profile.study_program) response[] = str(sub.assignment.course) response[] = str(sub.assignment) response[] = sub.assignment.attachment_test_timeout if sub.state == Submission.TEST_VALIDITY_PENDING: response[] = response[] = sub.assignment.validity_test_url(request) elif sub.state == Submission.TEST_FULL_PENDING or sub.state == Submission.CLOSED_TEST_FULL_PENDING: response[] = response[] = sub.assignment.full_test_url(request) else: assert (False) logger.debug("Delivering submission %u as new %s job" % (sub.pk, response[])) return response elif request.method == "POST": if request.POST[] == : machine = TestMachine.objects.get( pk=int(request.POST[])) machine.config = request.POST[] machine.save() return HttpResponse(status=201) sid = request.POST[] submission_file = get_object_or_404(SubmissionFile, pk=sid) sub = submission_file.submissions.all()[0] logger.debug("Storing executor results for submission %u" % (sub.pk)) error_code = int(request.POST[]) if request.POST[] == and sub.state == Submission.TEST_VALIDITY_PENDING: sub.save_validation_result( machine, request.POST[], request.POST[]) if error_code == 0: if sub.assignment.attachment_test_full: logger.debug( "Validity test working, setting state to pending full test") sub.state = Submission.TEST_FULL_PENDING else: logger.debug( "Validity test working, setting state to tested") sub.state = Submission.SUBMITTED_TESTED if not sub.assignment.is_graded(): sub.state = Submission.CLOSED sub.inform_student(request, Submission.CLOSED) else: logger.debug( "Validity test not working, setting state to failed") sub.state = Submission.TEST_VALIDITY_FAILED sub.inform_student(request, sub.state) elif request.POST[] == and sub.state == Submission.TEST_FULL_PENDING: sub.save_fulltest_result( machine, request.POST[]) if error_code == 0: if sub.assignment.is_graded(): logger.debug("Full test working, setting state to tested (since graded)") sub.state = Submission.SUBMITTED_TESTED else: logger.debug("Full test working, setting state to closed (since not graded)") sub.state = Submission.CLOSED sub.inform_student(request, Submission.CLOSED) else: logger.debug("Full test not working, setting state to failed") sub.state = Submission.TEST_FULL_FAILED elif request.POST[] == and sub.state == Submission.CLOSED_TEST_FULL_PENDING: logger.debug( "Closed full test done, setting state to closed again") sub.save_fulltest_result( machine, request.POST[]) sub.state = Submission.CLOSED elif request.POST[] == and sub.state == Submission.TEST_VALIDITY_FAILED: logger.debug( "Ignoring executor result, since the submission is already marked as failed.") else: msg = % (sub.pk, submission_file.pk, request.POST[], sub.state_for_tutors(), sub.state, request.POST[], error_code) mail_managers(, msg, fail_silently=True) sub.save() sub.clean_fetch_date() return HttpResponse(status=201)
This is the view used by the executor.py scripts for getting / putting the test results. Fetching some file for testing is changing the database, so using GET here is not really RESTish. Whatever. A visible shared secret in the request is no problem, since the executors come from trusted networks. The secret only protects this view from outside foreigners. TODO: Make it a real API, based on some framework. TODO: Factor out state model from this method into some model. POST requests with 'Action'='get_config' are expected to contain the following parameters: 'MachineId', 'Config', 'Secret', 'UUID' All other POST requests are expected to contain the following parameters: 'SubmissionFileId', 'Message', 'ErrorCode', 'Action', 'Secret', 'UUID' GET requests are expected to contain the following parameters: 'Secret', 'UUID' GET reponses deliver the following elements in the header: 'SubmissionFileId', 'Timeout', 'Action', 'PostRunValidation'
553
def get_correlation(self, t1, t2): t_min = min(t1, t2) t_max = max(t1, t2) c1 = 1.0 c1 -= np.cos(np.pi / 2.0 - np.log(t_max / max(t_min, 0.109)) * 0.366) if t_max < 0.2: c2 = 0.105 * (1.0 - 1.0 / (1.0 + np.exp(100.0 * t_max - 5.0))) c2 = 1.0 - c2 * (t_max - t_min) / (t_max - 0.0099) else: c2 = 0 if t_max < 0.109: c3 = c2 else: c3 = c1 c4 = c1 c4 += 0.5 * (np.sqrt(c3) - c3) * (1.0 + np.cos(np.pi * t_min / 0.109)) if t_max <= 0.109: rho = c2 elif t_min > 0.109: rho = c1 elif t_max < 0.2: rho = min(c2, c4) else: rho = c4 return rho
Computes the correlation coefficient for the specified periods. :param float t1: First period of interest. :param float t2: Second period of interest. :return float rho: The predicted correlation coefficient.
554
def update_devices(self, devices): for qspacket in devices: try: qsid = qspacket[QS_ID] except KeyError: _LOGGER.debug("Device without ID: %s", qspacket) continue if qsid not in self: self[qsid] = QSDev(data=qspacket) dev = self[qsid] dev.data = qspacket newqs = _legacy_status(qspacket[QS_VALUE]) if dev.is_dimmer: newqs = min(round(math.pow(newqs, self.dim_adj)), 100) newin = round(newqs * _MAX / 100) if abs(dev.value - newin) > 1: _LOGGER.debug("%s qs=%s --> %s", qsid, newqs, newin) dev.value = newin self._cb_value_changed(self, qsid, newin)
Update values from response of URL_DEVICES, callback if changed.
555
def ud_grade_ipix(ipix, nside_in, nside_out, nest=False): if nside_in == nside_out: return ipix elif nside_in < nside_out: return u_grade_ipix(ipix, nside_in, nside_out, nest) elif nside_in > nside_out: return d_grade_ipix(ipix, nside_in, nside_out, nest)
Upgrade or degrade resolution of a pixel list. Parameters: ----------- ipix:array-like the input pixel(s) nside_in:int the nside of the input pixel(s) nside_out:int the desired nside of the output pixel(s) order:str pixel ordering of input and output ("RING" or "NESTED") Returns: -------- pix_out:array-like the upgraded or degraded pixel array
556
def reconnect(self): self._converters.clear() self._gateway = None self._xsltFactory = None try: self._gateway = JavaGateway(GatewayClient(port=self._gwPort)) self._xsltFactory = self._gateway.jvm.org.pyjxslt.XSLTTransformerFactory() self._refresh_converters() except (socket.error, Py4JNetworkError) as e: print(e) self._gateway = None return False return True
(Re)establish the gateway connection @return: True if connection was established
557
def requires(self, extras=()): dm = self._dep_map deps = [] deps.extend(dm.get(None, ())) for ext in extras: try: deps.extend(dm[safe_extra(ext)]) except KeyError: raise UnknownExtra( "%s has no such extra feature %r" % (self, ext) ) return deps
List of Requirements needed for this distro if `extras` are used
558
def _do_search(conf): connargs = {} for name in [, , , , , ]: connargs[name] = _config(name, conf) if connargs[] and connargs[]: connargs[] = False try: _filter = conf[] except KeyError: raise SaltInvocationError() _dn = _config(, conf) scope = _config(, conf) _lists = _config(, conf) or [] _attrs = _config(, conf) or [] _dict_key_attr = _config(, conf, ) attrs = _lists + _attrs + [_dict_key_attr] if not attrs: attrs = None try: result = __salt__[](_filter, _dn, scope, attrs, **connargs)[] except IndexError: log.debug(, _filter) result = {} except Exception: log.critical( , exc_info=True ) return {} return result
Builds connection and search arguments, performs the LDAP search and formats the results as a dictionary appropriate for pillar use.
559
def yellow(cls): "Make the text foreground color yellow." wAttributes = cls._get_text_attributes() wAttributes &= ~win32.FOREGROUND_MASK wAttributes |= win32.FOREGROUND_YELLOW cls._set_text_attributes(wAttributes)
Make the text foreground color yellow.
560
def _started_channels(self): super(IPythonWidget, self)._started_channels() self._load_guiref_magic() self.kernel_manager.shell_channel.history(hist_access_type=, n=1000)
Reimplemented to make a history request and load %guiref.
561
def _add_study_provenance( self, phenotyping_center, colony, project_fullname, pipeline_name, pipeline_stable_id, procedure_stable_id, procedure_name, parameter_stable_id, parameter_name, statistical_method, resource_name, row_num ): provenance_model = Provenance(self.graph) model = Model(self.graph) study_bnode = self.make_id("{0}{1}{2}{3}{4}{5}{6}{7}".format( phenotyping_center, colony, project_fullname, pipeline_stable_id, procedure_stable_id, parameter_stable_id, statistical_method, resource_name), ) model.addIndividualToGraph( study_bnode, None, self.globaltt[]) study_parts = [] model.addIndividualToGraph(self.resolve(procedure_stable_id), procedure_name) study_parts.append(self.resolve(procedure_stable_id)) study_parts.append(self.resolve(statistical_method)) provenance_model.add_study_parts(study_bnode, study_parts) parameter_label = "{0} ({1})".format(parameter_name, procedure_name) logging.info("Adding Provenance") model.addIndividualToGraph( self.resolve(parameter_stable_id), parameter_label) provenance_model.add_study_measure( study_bnode, self.resolve(parameter_stable_id)) colony_bnode = self.make_id("{0}".format(colony), ) model.addIndividualToGraph(colony_bnode, colony) model.addIndividualToGraph( self.resolve(phenotyping_center), phenotyping_center, self.globaltt[]) model.addTriple( study_bnode, self.globaltt[], self.resolve(phenotyping_center)) model.addIndividualToGraph( self.resolve(pipeline_stable_id), pipeline_name) model.addTriple( study_bnode, self.globaltt[], self.resolve(pipeline_stable_id)) model.addIndividualToGraph( self.resolve(project_fullname), project_fullname, self.globaltt[]) model.addTriple( study_bnode, self.globaltt[], self.resolve(project_fullname)) return study_bnode
:param phenotyping_center: str, from self.files['all'] :param colony: str, from self.files['all'] :param project_fullname: str, from self.files['all'] :param pipeline_name: str, from self.files['all'] :param pipeline_stable_id: str, from self.files['all'] :param procedure_stable_id: str, from self.files['all'] :param procedure_name: str, from self.files['all'] :param parameter_stable_id: str, from self.files['all'] :param parameter_name: str, from self.files['all'] :param statistical_method: str, from self.files['all'] :param resource_name: str, from self.files['all'] :return: study bnode
562
def get_agents(self): collection = JSONClientValidated(, collection=, runtime=self._runtime) result = collection.find(self._view_filter()).sort(, DESCENDING) return objects.AgentList(result, runtime=self._runtime, proxy=self._proxy)
Gets all ``Agents``. In plenary mode, the returned list contains all known agents or an error results. Otherwise, the returned list may contain only those agents that are accessible through this session. return: (osid.authentication.AgentList) - a list of ``Agents`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
563
def astype(self, dtype): if dtype is None: raise ValueError() dtype = np.dtype(dtype) if dtype == self.dtype: return self if is_numeric_dtype(self.dtype): if dtype == self.__real_dtype: if self.__real_space is None: self.__real_space = self._astype(dtype) return self.__real_space elif dtype == self.__complex_dtype: if self.__complex_space is None: self.__complex_space = self._astype(dtype) return self.__complex_space else: return self._astype(dtype) else: return self._astype(dtype)
Return a copy of this space with new ``dtype``. Parameters ---------- dtype : Scalar data type of the returned space. Can be provided in any way the `numpy.dtype` constructor understands, e.g. as built-in type or as a string. Data types with non-trivial shapes are not allowed. Returns ------- newspace : `TensorSpace` Version of this space with given data type.
564
def _set_src_vtep_ip(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={: u}), is_leaf=True, yang_name="src-vtep-ip", rest_name="src-vtep-ip-host", parent=self, choice=(u, u), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u, u: None, u: None}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "inet:ipv4-address", : , }) self.__src_vtep_ip = t if hasattr(self, ): self._set()
Setter method for src_vtep_ip, mapped from YANG variable /overlay/access_list/type/vxlan/standard/seq/src_vtep_ip (inet:ipv4-address) If this variable is read-only (config: false) in the source YANG file, then _set_src_vtep_ip is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_src_vtep_ip() directly.
565
def _authenticate(self): self.cleanup_headers() url = LOGIN_ENDPOINT data = self.query( url, method=, extra_params={ : self.__username, : self.__password }) if isinstance(data, dict) and data.get(): data = data.get() self.authenticated = data.get() self.country_code = data.get() self.date_created = data.get() self.__token = data.get() self.userid = data.get() self.__headers[] = self.__token
Authenticate user and generate token.
566
def check_subscriber_key_length(app_configs=None, **kwargs): from . import settings as djstripe_settings messages = [] key = djstripe_settings.SUBSCRIBER_CUSTOMER_KEY key_size = len(str(key)) if key and key_size > 40: messages.append( checks.Error( "DJSTRIPE_SUBSCRIBER_CUSTOMER_KEY must be no more than 40 characters long", hint="Current value: %r (%i characters)" % (key, key_size), id="djstripe.E001", ) ) return messages
Check that DJSTRIPE_SUBSCRIBER_CUSTOMER_KEY fits in metadata. Docs: https://stripe.com/docs/api#metadata
567
def pandasdfsummarytojson(df, ndigits=3): df = df.transpose() return {k: _pandassummarytojson(v, ndigits) for k, v in df.iterrows()}
Convert the result of a Parameters ---------- df : The result of a Pandas describe operation. ndigits : int, optional - The number of significant digits to round to. Returns ------- A json object which captures the describe. Keys are field names and values are dictionaries with all of the indexes returned by the Pandas describe.
568
def all(cls, sort=None, limit=None): return cls.where(sort=sort, limit=limit)
Returns all objects of this type. Alias for where() (without filter arguments). See `where` for documentation on the `sort` and `limit` parameters.
569
def parse_config(data: dict) -> dict: return { : data.get(), : data[], : [{ : sample_id, : analysis_type, } for sample_id, analysis_type in data[].items()], : data[], : True if in data else False, : data[], : data[], : data[], : data[], }
Parse MIP config file. Args: data (dict): raw YAML input from MIP analysis config file Returns: dict: parsed data
570
def get_tasks(self): tasks = self._get_tasks() tasks.extend(self._streams.get_tasks(self)) return tasks
Get the tasks attached to the instance Returns ------- list List of tasks (:class:`asyncio.Task`)
571
def diffuser_conical(Di1, Di2, l=None, angle=None, fd=None, Re=None, roughness=0.0, method=): rRennelsRennelsCraneMillerSwameeIdelchik beta = Di1/Di2 beta2 = beta*beta if angle is not None: angle_rad = radians(angle) l = (Di2 - Di1)/(2.0*tan(0.5*angle_rad)) elif l is not None: angle_rad = 2.0*atan(0.5*(Di2-Di1)/l) angle = degrees(angle_rad) else: raise Exception() if method is None: method == if method == : if fd is None: if Re is None: raise ValueError("The `Rennels` method requires either a " "specified friction factor or `Re`") fd = Colebrook(Re=Re, eD=roughness/Di2, tol=-1) if 0.0 < angle <= 20.0: K = 8.30*tan(0.5*angle_rad)**1.75*(1.0 - beta2)**2 + 0.125*fd*(1.0 - beta2*beta2)/sin(0.5*angle_rad) elif 20.0 < angle <= 60.0 and 0.0 <= beta < 0.5: K = (1.366*sin(2.0*pi*(angle - 15.0)/180.)**0.5 - 0.170 - 3.28*(0.0625-beta**4)*(0.025*(angle-20.0))**0.5)*(1.0 - beta2)**2 + 0.125*fd*(1.0 - beta2*beta2)/sin(0.5*angle_rad) elif 20.0 < angle <= 60.0 and beta >= 0.5: K = (1.366*sin(2.0*pi*(angle - 15.0)/180.0)**0.5 - 0.170)*(1.0 - beta2)**2 + 0.125*fd*(1.0 - beta2*beta2)/sin(0.5*angle_rad) elif 60.0 < angle <= 180.0 and 0.0 <= beta < 0.5: beta4 = beta2*beta2 K = (1.205 - 3.28*(0.0625 - beta4) - 12.8*beta4*beta2*((angle - 60.0)/120.)**0.5)*(1.0 - beta2)**2 elif 60.0 < angle <= 180.0 and beta >= 0.5: K = (1.205 - 0.20*((angle - 60.0)/120.)**0.5)*(1.0 - beta**2)**2 else: raise Exception() return K elif method == : return diffuser_conical_Crane(Di1=Di1, Di2=Di2, l=l, angle=angle) elif method == : A_ratio = 1.0/beta2 if A_ratio > 4.0: A_ratio = 4.0 elif A_ratio < 1.1: A_ratio = 1.1 l_R1_ratio = l/(0.5*Di1) if l_R1_ratio < 0.1: l_R1_ratio = 0.1 elif l_R1_ratio > 20.0: l_R1_ratio = 20.0 Kd = max(float(bisplev(log(l_R1_ratio), log(A_ratio), tck_diffuser_conical_Miller)), 0) return Kd elif method == : A_ratio = beta2 if angle > 20.0: angle_fric = 20.0 elif angle < 2.0: angle_fric = 2.0 else: angle_fric = angle A_ratio_fric = A_ratio if A_ratio_fric < 0.05: A_ratio_fric = 0.05 elif A_ratio_fric > 0.6: A_ratio_fric = 0.6 K_fr = float(contraction_conical_frction_Idelchik_obj(angle_fric, A_ratio_fric)) K_exp = float(diffuser_conical_Idelchik_obj(min(0.6, A_ratio), max(3.0, angle))) return K_fr + K_exp elif method == : r = Di2/Di1 K = (0.25*angle_rad**-3*(1.0 + 0.6*r**(-1.67)*(pi-angle_rad)/angle_rad)**(0.533*r - 2.6))**-0.5 return K else: raise ValueError( %(diffuser_conical_methods))
r'''Returns the loss coefficient for any conical pipe diffuser. This calculation has four methods available. The 'Rennels' [1]_ formulas are as follows (three different formulas are used, depending on the angle and the ratio of diameters): For 0 to 20 degrees, all aspect ratios: .. math:: K_1 = 8.30[\tan(\alpha/2)]^{1.75}(1-\beta^2)^2 + \frac{f(1-\beta^4)}{8\sin(\alpha/2)} For 20 to 60 degrees, beta < 0.5: .. math:: K_1 = \left\{1.366\sin\left[\frac{2\pi(\alpha-15^\circ)}{180}\right]^{0.5} - 0.170 - 3.28(0.0625-\beta^4)\sqrt{\frac{\alpha-20^\circ}{40^\circ}}\right\} (1-\beta^2)^2 + \frac{f(1-\beta^4)}{8\sin(\alpha/2)} For 20 to 60 degrees, beta >= 0.5: .. math:: K_1 = \left\{1.366\sin\left[\frac{2\pi(\alpha-15^\circ)}{180}\right]^{0.5} - 0.170 \right\}(1-\beta^2)^2 + \frac{f(1-\beta^4)}{8\sin(\alpha/2)} For 60 to 180 degrees, beta < 0.5: .. math:: K_1 = \left[1.205 - 3.28(0.0625-\beta^4)-12.8\beta^6\sqrt{\frac {\alpha-60^\circ}{120^\circ}}\right](1-\beta^2)^2 For 60 to 180 degrees, beta >= 0.5: .. math:: K_1 = \left[1.205 - 0.20\sqrt{\frac{\alpha-60^\circ}{120^\circ}} \right](1-\beta^2)^2 The Swamee [5]_ formula is: .. math:: K = \left\{\frac{0.25}{\theta^3}\left[1 + \frac{0.6}{r^{1.67}} \left(\frac{\pi-\theta}{\theta} \right) \right]^{0.533r - 2.6} \right\}^{-0.5} .. figure:: fittings/diffuser_conical.png :scale: 60 % :alt: diffuser conical; after [1]_ Parameters ---------- Di1 : float Inside diameter of original pipe (smaller), [m] Di2 : float Inside diameter of following pipe (larger), [m] l : float, optional Length of the contraction along the pipe axis, optional, [m] angle : float, optional Angle of contraction, [degrees] fd : float, optional Darcy friction factor [-] Re : float, optional Reynolds number of the pipe (used in Rennels method only if no friction factor given), [m] roughness : float, optional Roughness of bend wall (used in Rennel method if no friction factor given), [m] method : str The method to use for the calculation; one of 'Rennels', 'Crane', 'Miller', 'Swamee', or 'Idelchik' [-] Returns ------- K : float Loss coefficient with respect to smaller, upstream diameter [-] Notes ----- The Miller method changes around quite a bit. There is quite a bit of variance in the predictions of the methods, as demonstrated by the following figure. .. plot:: plots/diffuser_conical.py Examples -------- >>> diffuser_conical(Di1=1/3., Di2=1.0, angle=50.0, Re=1E6) 0.8027721093415322 References ---------- .. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012. .. [2] Idel’chik, I. E. Handbook of Hydraulic Resistance: Coefficients of Local Resistance and of Friction (Spravochnik Po Gidravlicheskim Soprotivleniyam, Koeffitsienty Mestnykh Soprotivlenii i Soprotivleniya Treniya). National technical information Service, 1966. .. [3] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane, 2009. .. [4] Swamee, Prabhata K., and Ashok K. Sharma. Design of Water Supply Pipe Networks. John Wiley & Sons, 2008. .. [5] Miller, Donald S. Internal Flow Systems: Design and Performance Prediction. Gulf Publishing Company, 1990.
572
def print_err(*args, **kwargs): if kwargs.get(, None) is None: kwargs[] = sys.stderr color = dict_pop_or(kwargs, , True) if color and kwargs[].isatty(): msg = kwargs.get(, ).join( str(a) if isinstance(a, C) else str(C(a, )) for a in args ) else: msg = kwargs.get(, ).join( str(a.stripped() if isinstance(a, C) else a) for a in args ) newline = dict_pop_or(kwargs, , False) if newline: msg = .format(msg) print(msg, **kwargs)
A wrapper for print() that uses stderr by default.
573
def get_as(self, cls: Type[MaybeBytesT]) -> Sequence[MaybeBytesT]: _ = cls return cast(Sequence[MaybeBytesT], self.items)
Return the list of parsed objects.
574
def optionally_with_args(phase, **kwargs): if isinstance(phase, PhaseGroup): return phase.with_args(**kwargs) if isinstance(phase, collections.Iterable): return [optionally_with_args(p, **kwargs) for p in phase] if not isinstance(phase, phase_descriptor.PhaseDescriptor): phase = phase_descriptor.PhaseDescriptor.wrap_or_copy(phase) return phase.with_known_args(**kwargs)
Apply only the args that the phase knows. If the phase has a **kwargs-style argument, it counts as knowing all args. Args: phase: phase_descriptor.PhaseDescriptor or PhaseGroup or callable, or iterable of those, the phase or phase group (or iterable) to apply with_args to. **kwargs: arguments to apply to the phase. Returns: phase_descriptor.PhaseDescriptor or PhaseGroup or iterable with the updated args.
575
def fix(csvfile): header(, csvfile.name) bads = [] reader = csv.reader(csvfile) reader.next() for id, _, sources, dests in reader: advice = Advice.objects.get(id=id) sources = [s.strip() for s in sources.split() if s.strip()] dests = [d.strip() for d in dests.split() if d.strip()] if not len(sources) == len(dests): bads.append(id) continue for source, dest in zip(sources, dests): echo(, white(id), white(source), white(dest)) advice.subject = advice.subject.replace(source, dest) advice.content = advice.content.replace(source, dest) advice.save() index(advice) for id in bads: echo(, white(id)) success()
Apply a fix (ie. remove plain names)
576
def outgoing_caller_ids(self): if self._outgoing_caller_ids is None: self._outgoing_caller_ids = OutgoingCallerIdList(self._version, account_sid=self._solution[], ) return self._outgoing_caller_ids
Access the outgoing_caller_ids :returns: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdList :rtype: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdList
577
def get_autoscaling_group_properties(asg_client, env, service): try: response = asg_client.describe_auto_scaling_groups(AutoScalingGroupNames=["{}-{}".format(env, service)]) if len(response["AutoScalingGroups"]) == 0: response = asg_client.describe_tags(Filters=[{ "Name": "Key", "Values": ["Name"] }, { "Name": "Value", "Values": ["{}-{}".format(env, service)]}]) if len(response["Tags"]) == 0: return None else: asg_name = response["Tags"][0]["ResourceId"] response = asg_client.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name]) return response["AutoScalingGroups"] else: return response["AutoScalingGroups"] except ClientError as error: raise RuntimeError("Error in finding autoscaling group {} {}".format(env, service), error)
Gets the autoscaling group properties based on the service name that is provided. This function will attempt the find the autoscaling group base on the following logic: 1. If the service name provided matches the autoscaling group name 2. If the service name provided matches the Name tag of the autoscaling group 3. If the service name provided does not match the above, return None Args: clients: Instantiated boto3 autoscaling client env: Name of the environment to search for the autoscaling group service: Name of the service Returns: JSON object of the autoscaling group properties if it exists
578
def mode_number(self, rows: List[Row], column: NumberColumn) -> Number: most_frequent_list = self._get_most_frequent_values(rows, column) if not most_frequent_list: return 0.0 most_frequent_value = most_frequent_list[0] if not isinstance(most_frequent_value, Number): raise ExecutionError(f"Invalid valus for mode_number: {most_frequent_value}") return most_frequent_value
Takes a list of rows and a column and returns the most frequent value under that column in those rows.
579
def _conflicted_data_points(L): m = sparse.diags(np.ravel(L.max(axis=1).todense())) return np.ravel(np.max(m @ (L != 0) != L, axis=1).astype(int).todense())
Returns an indicator vector where ith element = 1 if x_i is labeled by at least two LFs that give it disagreeing labels.
580
def check_schema_coverage(doc, schema): name error_list = [] to_delete = [] for entry in doc.list_tuples(): (name, value, index, seq) = entry temp_schema = schema_match_up(doc, schema) if not name in temp_schema.list_values("name"): error_list.append( ("[error]", "doc", seq, "a name of not found in schema".format(name)) ) to_delete.append(seq) else: el = check_schema_coverage(doc[name, value, index], temp_schema["name", name]) error_list.extend(el) for seq in to_delete: doc.seq_delete(seq) return error_list
FORWARD CHECK OF DOCUMENT This routine looks at each element in the doc, and makes sure there is a matching 'name' in the schema at that level.
581
def _write_passphrase(stream, passphrase, encoding): passphrase = % passphrase passphrase = passphrase.encode(encoding) stream.write(passphrase) log.debug("Wrote passphrase on stdin.")
Write the passphrase from memory to the GnuPG process' stdin. :type stream: file, :class:`~io.BytesIO`, or :class:`~io.StringIO` :param stream: The input file descriptor to write the password to. :param str passphrase: The passphrase for the secret key material. :param str encoding: The data encoding expected by GnuPG. Usually, this is ``sys.getfilesystemencoding()``.
582
def autoprefixer(input, **kw): cmd = % (current_app.config.get(), current_app.config.get(), input) subprocess.call(cmd, shell=True)
Run autoprefixer
583
def _parse_nicknameinuse(client, command, actor, args): nick, _, _ = args.rpartition(" ") client.dispatch_event("NICKNAMEINUSE", nick)
Parse a NICKNAMEINUSE message and dispatch an event. The parameter passed along with the event is the nickname which is already in use.
584
def assign_objective_requisite(self, objective_id, requisite_objective_id): requisite_type = Type(**Relationship().get_type_data()) ras = self._get_provider_manager( ).get_relationship_admin_session_for_family( self.get_objective_bank_id(), proxy=self._proxy) rfc = ras.get_relationship_form_for_create(objective_id, requisite_objective_id, []) rfc.set_display_name() rfc.set_description() rfc.set_genus_type(requisite_type) ras.create_relationship(rfc)
Creates a requirement dependency between two ``Objectives``. arg: objective_id (osid.id.Id): the ``Id`` of the dependent ``Objective`` arg: requisite_objective_id (osid.id.Id): the ``Id`` of the required ``Objective`` raise: AlreadyExists - ``objective_id`` already mapped to ``requisite_objective_id`` raise: NotFound - ``objective_id`` or ``requisite_objective_id`` not found raise: NullArgument - ``objective_id`` or ``requisite_objective_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
585
def write_byte(self, address, value): LOGGER.debug("Writing byte %s to device %s!", bin(value), hex(address)) return self.driver.write_byte(address, value)
Writes the byte to unaddressed register in a device.
586
def restore_state(self, state): super(ReferenceController, self).restore_state(state) state_name = state.get() state_version = state.get() if state_name != self.STATE_NAME or state_version != self.STATE_VERSION: raise ArgumentError("Invalid emulated device state name or version", found=(state_name, state_version), expected=(self.STATE_NAME, self.STATE_VERSION)) self.app_info = state.get(, (0, "0.0")) self.os_info = state.get(, (0, "0.0")) self.sensor_log.prepare_for_restore() self.remote_bridge.restore(state.get(, {})) self.tile_manager.restore(state.get(, {})) self.config_database.restore(state.get(, {})) self.sensor_log.restore(state.get(, {}))
Restore the current state of this emulated object. Args: state (dict): A previously dumped state produced by dump_state.
587
def parse(self, data): if self._initialized: raise pycdlibexception.PyCdlibInternalError() (self.prior_num_direct_entries, self.strategy_type, self.strategy_param, self.max_num_entries, reserved, self.file_type, self.parent_icb_log_block_num, self.parent_icb_part_ref_num, self.flags) = struct.unpack_from(self.FMT, data, 0) if self.strategy_type not in (4, 4096): raise pycdlibexception.PyCdlibInvalidISO() if reserved != 0: raise pycdlibexception.PyCdlibInvalidISO() self._initialized = True
Parse the passed in data into a UDF ICB Tag. Parameters: data - The data to parse. Returns: Nothing.
588
def handle_cf_error(error_pointer): if is_null(error_pointer): return error = unwrap(error_pointer) if is_null(error): return cf_string_domain = CoreFoundation.CFErrorGetDomain(error) domain = CFHelpers.cf_string_to_unicode(cf_string_domain) CoreFoundation.CFRelease(cf_string_domain) num = CoreFoundation.CFErrorGetCode(error) cf_string_ref = CoreFoundation.CFErrorCopyDescription(error) output = CFHelpers.cf_string_to_unicode(cf_string_ref) CoreFoundation.CFRelease(cf_string_ref) if output is None: if domain == : code_map = { -2147416010: , -2147416025: , -2147416019: , -2147416015: , -2147416012: , -2147416017: , -2147416011: , -2147416021: , -2147415789: , -2147415726: , -2147415040: , -2147415036: , -2147415037: , -2147415039: , -2147415038: , -2147415034: , -2147415802: , -2147415731: , -2147415722: , -2147415804: , -2147415835: , -2147415803: , -2147415836: , -2147415728: , -2147416054: , -2147416057: , -2147415807: , -2147415837: , -2147416063: , -2147416027: , -2147416026: , -2147416020: , -2147416016: , -2147416018: , -2147416022: , -2147415759: , -2147415678: , -2147415704: , -2147415686: , -2147415738: , -2147415680: , -2147415696: , -2147415692: , -2147415752: , -2147415682: , -2147415754: , -2147415740: , -2147415700: , -2147415702: , -2147415698: , -2147415708: , -2147415748: , -2147415742: , -2147415688: , -2147415674: , -2147415676: , -2147415746: , -2147415706: , -2147415750: , -2147415744: , -2147415694: , -2147415684: , -2147415672: , -2147415690: , -2147415670: , -2147415760: , -2147416000: , -2147415976: , -2147415994: , -2147415768: , -2147415723: , -2147416059: , -2147415766: , -2147415792: , -2147415780: , -2147415782: , -2147415790: , -2147415776: , -2147415778: , -2147415783: , -2147415791: , -2147415727: , -2147416014: , -2147416013: , -2147416058: , -2147415765: , -2147415978: , -2147416060: , -2147416024: , -2147415733: , -2147415787: , -2147415786: , -2147415724: , -2147415788: , -2147416061: , -2147416062: , -2147415677: , -2147415703: , -2147415685: , -2147415737: , -2147415679: , -2147415695: , -2147415691: , -2147415751: , -2147415681: , -2147415753: , -2147415739: , -2147415699: , -2147415701: , -2147415697: , -2147415707: , -2147415747: , -2147415741: , -2147415687: , -2147415673: , -2147415675: , -2147415745: , -2147415705: , -2147415749: , -2147415743: , -2147415693: , -2147415683: , -2147415671: , -2147415689: , -2147415669: , -2147415801: , -2147415840: , -2147416029: , -2147416028: , -2147416030: , -2147416031: , -2147416032: , -2147416055: , -2147415806: , -2147415725: , -2147415730: , -2147415989: , -2147415805: , -2147415729: , -2147415732: , -2147416023: , -2147416056: , -2147415838: , -2147415736: , -2147415735: , -2147415779: , -2147415781: , -2147415785: , -2147415777: , -2147415784: , -2147415839: , -2147415767: , -2147415734: , } if num in code_map: output = code_map[num] if not output: output = % (domain, num) raise OSError(output)
Checks a CFErrorRef and throws an exception if there is an error to report :param error_pointer: A CFErrorRef :raises: OSError - when the CFErrorRef contains an error
589
def list_models(self, limit=-1, offset=-1): return self.registry.list_models(limit=limit, offset=offset)
Get a list of models in the registry. Parameters ---------- limit : int Limit number of items in the result set offset : int Set offset in list (order as defined by object store) Returns ------- list(ModelHandle)
590
def disconnect(self, signal=None, slot=None, transform=None, condition=None): if slot: self.connections[signal][condition].pop(slot, None) elif condition is not None: self.connections[signal].pop(condition, None) elif signal: self.connections.pop(signal, None) else: delattr(self, )
Removes connection(s) between this objects signal and connected slot(s) signal: the signal this class will emit, to cause the slot method to be called receiver: the object containing the slot method to be called slot: the slot method or function to call transform: an optional value override to pass into the slot method as the first variable condition: only call the slot method if the value emitted matches this condition
591
def attach_bytes(key, the_bytes): tf_v1.add_to_collection( _ATTACHMENT_COLLECTION_INTERNAL, module_attachment_pb2.ModuleAttachment(key=key, value=the_bytes))
Adds a ModuleAttachment to the current graph. Args: key: A string with the unique key of the attachment. the_bytes: A bytes object with the serialized attachment.
592
def setCustomColorRamp(self, colors=[], interpolatedPoints=10): self._colorRamp = ColorRampGenerator.generateCustomColorRamp(colors, interpolatedPoints)
Accepts a list of RGB tuples and interpolates between them to create a custom color ramp. Returns the color ramp as a list of RGB tuples.
593
def from_utf8(buf, errors=): if isinstance(buf, unicode): return buf else: return unicode(buf, , errors)
Decodes a UTF-8 compatible, ASCII string into a unicode object. `buf` string or unicode string to convert. Returns unicode` string. * Raises a ``UnicodeDecodeError`` exception if encoding failed and `errors` isn't set to 'replace'.
594
def pause(self, instance_id, keep_provisioned=True): try: if self._paused: log.debug("node %s is already paused", instance_id) return self._paused = True post_shutdown_action = if keep_provisioned else \ result = self._subscription._sms.shutdown_role( service_name=self._cloud_service._name, deployment_name=self._cloud_service._name, role_name=self._qualified_name, post_shutdown_action=post_shutdown_action) self._subscription._wait_result(result) except Exception as exc: log.error("error pausing instance %s: %s", instance_id, exc) raise log.debug(, instance_id)
shuts down the instance without destroying it. The AbstractCloudProvider class uses 'stop' to refer to destroying a VM, so use 'pause' to mean powering it down while leaving it allocated. :param str instance_id: instance identifier :return: None
595
def xmlrpc_provision(self, app_id, path_to_cert_or_cert, environment, timeout=15): if environment not in (, ): raise xmlrpc.Fault(401, % ( environment,)) if not app_id in self.app_ids: self.app_ids[app_id] = APNSService(path_to_cert_or_cert, environment, timeout)
Starts an APNSService for the this app_id and keeps it running Arguments: app_id the app_id to provision for APNS path_to_cert_or_cert absolute path to the APNS SSL cert or a string containing the .pem file environment either 'sandbox' or 'production' timeout seconds to timeout connection attempts to the APNS server Returns: None
596
def read_json(self, params=None): response = self.read_raw(params=params) response.raise_for_status() return response.json()
Get information about the current entity. Call :meth:`read_raw`. Check the response status code, decode JSON and return the decoded JSON as a dict. :return: A dict. The server's response, with all JSON decoded. :raises: ``requests.exceptions.HTTPError`` if the response has an HTTP 4XX or 5XX status code. :raises: ``ValueError`` If the response JSON can not be decoded.
597
def asyncPipeRegex(context=None, _INPUT=None, conf=None, **kwargs): splits = yield asyncGetSplits(_INPUT, conf[], **cdicts(opts, kwargs)) asyncConvert = partial(maybeDeferred, convert_func) asyncFuncs = get_async_dispatch_funcs(, asyncConvert) parsed = yield asyncDispatch(splits, *asyncFuncs) _OUTPUT = yield maybeDeferred(parse_results, parsed) returnValue(iter(_OUTPUT))
An operator that asynchronously replaces text in items using regexes. Each has the general format: "In [field] replace [match] with [replace]". Not loopable. Parameters ---------- context : pipe2py.Context object _INPUT : asyncPipe like object (twisted Deferred iterable of items) conf : { 'RULE': [ { 'field': {'value': <'search field'>}, 'match': {'value': <'regex'>}, 'replace': {'value': <'replacement'>}, 'globalmatch': {'value': '1'}, 'singlelinematch': {'value': '2'}, 'multilinematch': {'value': '4'}, 'casematch': {'value': '8'} } ] } Returns ------- _OUTPUT : twisted.internet.defer.Deferred generator of items
598
def handle_sketch_version(msg): if not msg.gateway.is_sensor(msg.node_id): return None msg.gateway.sensors[msg.node_id].sketch_version = msg.payload msg.gateway.alert(msg) return None
Process an internal sketch version message.
599
def read_config(config_fname=None): if not config_fname: config_fname = DEFAULT_CONFIG_FNAME try: with open(config_fname, ) as config_file: config = yaml.load(config_file) except IOError as exc: if exc.errno == errno.ENOENT: print( .format(config_fname)) config = {} else: raise collate_config = config.pop(, {}) if type(collate_config) is bool: collate_config = {: collate_config} collatestr = foundkeys = [] for key in list(config.keys()): if key.startswith(collatestr): foundkeys.append(key) collate_config[key[len(collatestr):]] = config.pop(key) if foundkeys: print("Use of these keys is deprecated: {}.".format( ", ".join(foundkeys))) print("Instead use collate dictionary and subkey " "without prefix") config[] = collate_config return config
Parse input configuration file and return a config dict.