code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def get_python_shell(): """Determine python shell get_python_shell() returns 'shell' (started python on command line using "python") 'ipython' (started ipython on command line using "ipython") 'ipython-notebook' (e.g., running in Spyder or started with "ipython qtconsole") 'jupyter-notebook' (running in a Jupyter notebook) See also https://stackoverflow.com/a/37661854 """ env = os.environ shell = "shell" program = os.path.basename(env["_"]) if "jupyter-notebook" in program: shell = "jupyter-notebook" elif "JPY_PARENT_PID" in env or "ipython" in program: shell = "ipython" if "JPY_PARENT_PID" in env: shell = "ipython-notebook" return shell
Determine python shell get_python_shell() returns 'shell' (started python on command line using "python") 'ipython' (started ipython on command line using "ipython") 'ipython-notebook' (e.g., running in Spyder or started with "ipython qtconsole") 'jupyter-notebook' (running in a Jupyter notebook) See also https://stackoverflow.com/a/37661854
def snakescan(xi, yi, xf, yf): """Scan pixels in a snake pattern along the x-coordinate then y-coordinate :param xi: Initial x-coordinate :type xi: int :param yi: Initial y-coordinate :type yi: int :param xf: Final x-coordinate :type xf: int :param yf: Final y-coordinate :type yf: int :returns: Coordinate generator :rtype: function """ # Determine direction to move dx = 1 if xf >= xi else -1 dy = 1 if yf >= yi else -1 # Scan pixels first along x-coordinate then y-coordinate and flip # x-direction when the end of the line is reached x, xa, xb = xi, xi, xf for y in range(yi, yf + dy, dy): for x in range(xa, xb + dx, dx): yield x, y # Swap x-direction if x == xa or x == xb: dx *= -1 xa, xb = xb, xa
Scan pixels in a snake pattern along the x-coordinate then y-coordinate :param xi: Initial x-coordinate :type xi: int :param yi: Initial y-coordinate :type yi: int :param xf: Final x-coordinate :type xf: int :param yf: Final y-coordinate :type yf: int :returns: Coordinate generator :rtype: function
def _rectify_countdown_or_bool(count_or_bool): """ used by recursive functions to specify which level to turn a bool on in counting down yields True, True, ..., False counting up yields False, False, False, ... True Args: count_or_bool (bool or int): if positive and an integer, it will count down, otherwise it will remain the same. Returns: int or bool: count_or_bool_ CommandLine: python -m utool.util_str --test-_rectify_countdown_or_bool Example: >>> from ubelt.util_format import _rectify_countdown_or_bool # NOQA >>> count_or_bool = True >>> a1 = (_rectify_countdown_or_bool(2)) >>> a2 = (_rectify_countdown_or_bool(1)) >>> a3 = (_rectify_countdown_or_bool(0)) >>> a4 = (_rectify_countdown_or_bool(-1)) >>> a5 = (_rectify_countdown_or_bool(-2)) >>> a6 = (_rectify_countdown_or_bool(True)) >>> a7 = (_rectify_countdown_or_bool(False)) >>> a8 = (_rectify_countdown_or_bool(None)) >>> result = [a1, a2, a3, a4, a5, a6, a7, a8] >>> print(result) [1, 0, 0, -1, -2, True, False, False] """ if count_or_bool is True or count_or_bool is False: count_or_bool_ = count_or_bool elif isinstance(count_or_bool, int): if count_or_bool == 0: return 0 elif count_or_bool > 0: count_or_bool_ = count_or_bool - 1 else: # We dont countup negatives anymore count_or_bool_ = count_or_bool else: count_or_bool_ = False return count_or_bool_
used by recursive functions to specify which level to turn a bool on in counting down yields True, True, ..., False counting up yields False, False, False, ... True Args: count_or_bool (bool or int): if positive and an integer, it will count down, otherwise it will remain the same. Returns: int or bool: count_or_bool_ CommandLine: python -m utool.util_str --test-_rectify_countdown_or_bool Example: >>> from ubelt.util_format import _rectify_countdown_or_bool # NOQA >>> count_or_bool = True >>> a1 = (_rectify_countdown_or_bool(2)) >>> a2 = (_rectify_countdown_or_bool(1)) >>> a3 = (_rectify_countdown_or_bool(0)) >>> a4 = (_rectify_countdown_or_bool(-1)) >>> a5 = (_rectify_countdown_or_bool(-2)) >>> a6 = (_rectify_countdown_or_bool(True)) >>> a7 = (_rectify_countdown_or_bool(False)) >>> a8 = (_rectify_countdown_or_bool(None)) >>> result = [a1, a2, a3, a4, a5, a6, a7, a8] >>> print(result) [1, 0, 0, -1, -2, True, False, False]
def _verify(self, valid_subscriptions, fix): """Check if `self` is valid roster item. Valid item must have proper `subscription` and valid value for 'ask'. :Parameters: - `valid_subscriptions`: sequence of valid subscription values - `fix`: if `True` than replace invalid 'subscription' and 'ask' values with the defaults :Types: - `fix`: `bool` :Raise: `ValueError` if the item is invalid. """ if self.subscription not in valid_subscriptions: if fix: logger.debug("RosterItem.from_xml: got unknown 'subscription':" " {0!r}, changing to None".format(self.subscription)) self.subscription = None else: raise ValueError("Bad 'subscription'") if self.ask not in (None, u"subscribe"): if fix: logger.debug("RosterItem.from_xml: got unknown 'ask':" " {0!r}, changing to None".format(self.ask)) self.ask = None else: raise ValueError("Bad 'ask'")
Check if `self` is valid roster item. Valid item must have proper `subscription` and valid value for 'ask'. :Parameters: - `valid_subscriptions`: sequence of valid subscription values - `fix`: if `True` than replace invalid 'subscription' and 'ask' values with the defaults :Types: - `fix`: `bool` :Raise: `ValueError` if the item is invalid.
def open(self): """ Open the HID device for reading and writing. """ if self._is_open: raise HIDException("Failed to open device: HIDDevice already open") path = self.path.encode('utf-8') dev = hidapi.hid_open_path(path) if dev: self._is_open = True self._device = dev else: raise HIDException("Failed to open device")
Open the HID device for reading and writing.
def _build_str_from_chinese(chinese_items): """ 根据解析出的中文时间字符串的关键字返回对应的标准格式字符串 """ year, month, day = chinese_items year = reduce(lambda a, b: a*10+b, map(CHINESE_NUMS.find, year)) return '%04d-%02d-%02d 00:00:00' % (year, _parse_chinese_field(month), _parse_chinese_field(day))
根据解析出的中文时间字符串的关键字返回对应的标准格式字符串
def delete_workspace_config(namespace, workspace, cnamespace, config): """Delete method configuration in workspace. Args: namespace (str): project to which workspace belongs workspace (str): Workspace name mnamespace (str): Method namespace method (str): Method name Swagger: https://api.firecloud.org/#!/Method_Configurations/deleteWorkspaceMethodConfig """ uri = "workspaces/{0}/{1}/method_configs/{2}/{3}".format(namespace, workspace, cnamespace, config) return __delete(uri)
Delete method configuration in workspace. Args: namespace (str): project to which workspace belongs workspace (str): Workspace name mnamespace (str): Method namespace method (str): Method name Swagger: https://api.firecloud.org/#!/Method_Configurations/deleteWorkspaceMethodConfig
def print_row(self, **kwargs): ''' keys of kwargs must be the names passed to __init__(...) as `column_names` ''' meta_string = '|' for key in self.column_names: float_specifier = '' if isinstance(kwargs[key], float): float_specifier = '.3f' meta_string += " {%s:<{width}%s}|" % (key, float_specifier) kwargs['width'] = self.column_width - 1 print(meta_string.format(**kwargs)) print(self.hr)
keys of kwargs must be the names passed to __init__(...) as `column_names`
def spcol(x,knots,spline_order): """Computes the spline colocation matrix for knots in x. The spline collocation matrix contains all m-p-1 bases defined by knots. Specifically it contains the ith basis in the ith column. Input: x: vector to evaluate the bases on knots: vector of knots spline_order: order of the spline Output: colmat: m x m-p matrix The colocation matrix has size m x m-p where m denotes the number of points the basis is evaluated on and p is the spline order. The colums contain the ith basis of knots evaluated on x. """ colmat = np.nan*np.ones((len(x),len(knots) - spline_order-1)) for i in range(0,len(knots) - spline_order -1): colmat[:,i] = spline(x,knots,spline_order,i) return colmat
Computes the spline colocation matrix for knots in x. The spline collocation matrix contains all m-p-1 bases defined by knots. Specifically it contains the ith basis in the ith column. Input: x: vector to evaluate the bases on knots: vector of knots spline_order: order of the spline Output: colmat: m x m-p matrix The colocation matrix has size m x m-p where m denotes the number of points the basis is evaluated on and p is the spline order. The colums contain the ith basis of knots evaluated on x.
def _rebuild_all_command_chains(self): """ Rebuilds execution chain for all registered commands. This method is typically called when intercepters are changed. Because of that it is more efficient to register intercepters before registering commands (typically it will be done in abstract classes). However, that performance penalty will be only once during creation time. """ self._commands_by_name = {} for command in self._commands: self._build_command_chain(command)
Rebuilds execution chain for all registered commands. This method is typically called when intercepters are changed. Because of that it is more efficient to register intercepters before registering commands (typically it will be done in abstract classes). However, that performance penalty will be only once during creation time.
def to_html(ds: Any) -> str: """ Return an HTML representation of the loom file or view, showing the upper-left 10x10 corner. """ rm = min(10, ds.shape[0]) cm = min(10, ds.shape[1]) html = "<p>" if ds.attrs.__contains__("title"): html += "<strong>" + ds.attrs["title"] + "</strong> " html += f"{ds.shape[0]} rows, {ds.shape[1]} columns, {len(ds.layers)} layer{'s' if len(ds.layers) > 1 else ''}<br/>(showing up to 10x10)<br/>" html += ds.filename + "<br/>" for (name, val) in ds.attrs.items(): html += f"name: <em>{val}</em><br/>" html += "<table>" # Emit column attributes for ca in ds.col_attrs.keys(): html += "<tr>" for ra in ds.row_attrs.keys(): html += "<td>&nbsp;</td>" # Space for row attrs html += "<td><strong>" + ca + "</strong></td>" # Col attr name for v in ds.col_attrs[ca][:cm]: html += "<td>" + str(v) + "</td>" if ds.shape[1] > cm: html += "<td>...</td>" html += "</tr>" # Emit row attribute names html += "<tr>" for ra in ds.row_attrs.keys(): html += "<td><strong>" + ra + "</strong></td>" # Row attr name html += "<td>&nbsp;</td>" # Space for col attrs for v in range(cm): html += "<td>&nbsp;</td>" if ds.shape[1] > cm: html += "<td>...</td>" html += "</tr>" # Emit row attr values and matrix values for row in range(rm): html += "<tr>" for ra in ds.row_attrs.keys(): html += "<td>" + str(ds.row_attrs[ra][row]) + "</td>" html += "<td>&nbsp;</td>" # Space for col attrs for v in ds[row, :cm]: html += "<td>" + str(v) + "</td>" if ds.shape[1] > cm: html += "<td>...</td>" html += "</tr>" # Emit ellipses if ds.shape[0] > rm: html += "<tr>" for v in range(rm + 1 + len(ds.row_attrs.keys())): html += "<td>...</td>" if ds.shape[1] > cm: html += "<td>...</td>" html += "</tr>" html += "</table>" return html
Return an HTML representation of the loom file or view, showing the upper-left 10x10 corner.
def _clopper_pearson_confidence_interval(samples, error_rate): """Computes a confidence interval for the mean of the given 1-D distribution. Assumes (and checks) that the given distribution is Bernoulli, i.e., takes only two values. This licenses using the CDF of the binomial distribution for the confidence, which is tighter (for extreme probabilities) than the DKWM inequality. The method is known as the [Clopper-Pearson method] (https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval). Assumes: - The given samples were drawn iid from the distribution of interest. - The given distribution is a Bernoulli, i.e., supported only on low and high. Guarantees: - The probability (over the randomness of drawing the given sample) that the true mean is outside the returned interval is no more than the given error_rate. Args: samples: `np.ndarray` of samples drawn iid from the distribution of interest. error_rate: Python `float` admissible rate of mistakes. Returns: low: Lower bound of confidence interval. high: Upper bound of confidence interval. Raises: ValueError: If `samples` has rank other than 1 (batch semantics are not implemented), or if `samples` contains values other than `low` or `high` (as that makes the distribution not Bernoulli). """ # TODO(b/78025336) Migrate this confidence interval function # to statistical_testing.py. In order to do that # - Get the binomial CDF from the Binomial distribution # - Implement scalar root finding in TF. Batch bisection search # shouldn't be too hard, and is definitely good enough for this # problem. Batching the Brent algorithm (from scipy) that is used # here may be more involved, but may also not be necessary---it's # only used here because scipy made it convenient. In particular, # robustness is more important than speed here, which may make # bisection search actively better. # - The rest is just a matter of rewriting in the appropriate style. if optimize is None or stats is None: raise ValueError( "Scipy is required for computing Clopper-Pearson confidence intervals") if len(samples.shape) != 1: raise ValueError("Batch semantics not implemented") n = len(samples) low = np.amin(samples) high = np.amax(samples) successes = np.count_nonzero(samples - low) failures = np.count_nonzero(samples - high) if successes + failures != n: uniques = np.unique(samples) msg = ("Purportedly Bernoulli distribution had distinct samples" " {}, {}, and {}".format(uniques[0], uniques[1], uniques[2])) raise ValueError(msg) def p_small_enough(p): prob = stats.binom.logcdf(successes, n, p) return prob - np.log(error_rate / 2.) def p_big_enough(p): prob = stats.binom.logsf(successes, n, p) return prob - np.log(error_rate / 2.) high_p = optimize.brentq( p_small_enough, float(successes) / n, 1., rtol=1e-9) low_p = optimize.brentq( p_big_enough, 0., float(successes) / n, rtol=1e-9) low_interval = low + (high - low) * low_p high_interval = low + (high - low) * high_p return (low_interval, high_interval)
Computes a confidence interval for the mean of the given 1-D distribution. Assumes (and checks) that the given distribution is Bernoulli, i.e., takes only two values. This licenses using the CDF of the binomial distribution for the confidence, which is tighter (for extreme probabilities) than the DKWM inequality. The method is known as the [Clopper-Pearson method] (https://en.wikipedia.org/wiki/Binomial_proportion_confidence_interval). Assumes: - The given samples were drawn iid from the distribution of interest. - The given distribution is a Bernoulli, i.e., supported only on low and high. Guarantees: - The probability (over the randomness of drawing the given sample) that the true mean is outside the returned interval is no more than the given error_rate. Args: samples: `np.ndarray` of samples drawn iid from the distribution of interest. error_rate: Python `float` admissible rate of mistakes. Returns: low: Lower bound of confidence interval. high: Upper bound of confidence interval. Raises: ValueError: If `samples` has rank other than 1 (batch semantics are not implemented), or if `samples` contains values other than `low` or `high` (as that makes the distribution not Bernoulli).
def get_version_path(self, version=None): ''' Returns a storage path for the archive and version If the archive is versioned, the version number is used as the file path and the archive path is the directory. If not, the archive path is used as the file path. Parameters ---------- version : str or object Version number to use as file name on versioned archives (default latest unless ``default_version`` set) Examples -------- .. code-block:: python >>> arch = DataArchive(None, 'arch', None, 'a1', versioned=False) >>> print(arch.get_version_path()) a1 >>> >>> ver = DataArchive(None, 'ver', None, 'a2', versioned=True) >>> print(ver.get_version_path('0.0.0')) a2/0.0 >>> >>> print(ver.get_version_path('0.0.1a1')) a2/0.0.1a1 >>> >>> print(ver.get_version_path('latest')) # doctest: +ELLIPSIS Traceback (most recent call last): ... AttributeError: 'NoneType' object has no attribute 'manager' ''' version = _process_version(self, version) if self.versioned: return fs.path.join(self.archive_path, str(version)) else: return self.archive_path
Returns a storage path for the archive and version If the archive is versioned, the version number is used as the file path and the archive path is the directory. If not, the archive path is used as the file path. Parameters ---------- version : str or object Version number to use as file name on versioned archives (default latest unless ``default_version`` set) Examples -------- .. code-block:: python >>> arch = DataArchive(None, 'arch', None, 'a1', versioned=False) >>> print(arch.get_version_path()) a1 >>> >>> ver = DataArchive(None, 'ver', None, 'a2', versioned=True) >>> print(ver.get_version_path('0.0.0')) a2/0.0 >>> >>> print(ver.get_version_path('0.0.1a1')) a2/0.0.1a1 >>> >>> print(ver.get_version_path('latest')) # doctest: +ELLIPSIS Traceback (most recent call last): ... AttributeError: 'NoneType' object has no attribute 'manager'
def results(self, use_cache=True, dialect=None, billing_tier=None): """Retrieves table of results for the query. May block if the query must be executed first. Args: use_cache: whether to use cached results or not. Ignored if append is specified. dialect : {'legacy', 'standard'}, default 'legacy' 'legacy' : Use BigQuery's legacy SQL dialect. 'standard' : Use BigQuery's standard SQL (beta), which is compliant with the SQL 2011 standard. billing_tier: Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default. This can also be used to override your project-wide default billing tier on a per-query basis. Returns: A QueryResultsTable containing the result set. Raises: Exception if the query could not be executed or query response was malformed. """ if not use_cache or (self._results is None): self.execute(use_cache=use_cache, dialect=dialect, billing_tier=billing_tier) return self._results.results
Retrieves table of results for the query. May block if the query must be executed first. Args: use_cache: whether to use cached results or not. Ignored if append is specified. dialect : {'legacy', 'standard'}, default 'legacy' 'legacy' : Use BigQuery's legacy SQL dialect. 'standard' : Use BigQuery's standard SQL (beta), which is compliant with the SQL 2011 standard. billing_tier: Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). If unspecified, this will be set to your project default. This can also be used to override your project-wide default billing tier on a per-query basis. Returns: A QueryResultsTable containing the result set. Raises: Exception if the query could not be executed or query response was malformed.
def identify(self, req, resp, resource, uri_kwargs): """Identify user using Authenticate header with Basic auth.""" header = req.get_header("Authorization", False) auth = header.split(" ") if header else None if auth is None or auth[0].lower() != 'basic': return None if len(auth) != 2: raise HTTPBadRequest( "Invalid Authorization header", "The Authorization header for Basic auth should be in form:\n" "Authorization: Basic <base64-user-pass>" ) user_pass = auth[1] try: decoded = base64.b64decode(user_pass).decode() except (TypeError, UnicodeDecodeError, binascii.Error): raise HTTPBadRequest( "Invalid Authorization header", "Credentials for Basic auth not correctly base64 encoded." ) username, _, password = decoded.partition(":") return username, password
Identify user using Authenticate header with Basic auth.
def update(self, fields=None, **kwargs): """Update the current entity. Make an HTTP PUT call to ``self.path('base')``. Return the response. :param fields: An iterable of field names. Only the fields named in this iterable will be updated. No fields are updated if an empty iterable is passed in. All fields are updated if ``None`` is passed in. :return: A ``requests.response`` object. """ kwargs = kwargs.copy() # shadow the passed-in kwargs kwargs.update(self._server_config.get_client_kwargs()) # a content upload is always multipart headers = kwargs.pop('headers', {}) headers['content-type'] = 'multipart/form-data' kwargs['headers'] = headers return client.put( self.path('self'), fields, **kwargs )
Update the current entity. Make an HTTP PUT call to ``self.path('base')``. Return the response. :param fields: An iterable of field names. Only the fields named in this iterable will be updated. No fields are updated if an empty iterable is passed in. All fields are updated if ``None`` is passed in. :return: A ``requests.response`` object.
def lowdata_fmt(): ''' Validate and format lowdata from incoming unserialized request data This tool requires that the hypermedia_in tool has already been run. ''' if cherrypy.request.method.upper() != 'POST': return data = cherrypy.request.unserialized_data # if the data was sent as urlencoded, we need to make it a list. # this is a very forgiving implementation as different clients set different # headers for form encoded data (including charset or something similar) if data and isinstance(data, collections.Mapping): # Make the 'arg' param a list if not already if 'arg' in data and not isinstance(data['arg'], list): data['arg'] = [data['arg']] # Finally, make a Low State and put it in request cherrypy.request.lowstate = [data] else: cherrypy.serving.request.lowstate = data
Validate and format lowdata from incoming unserialized request data This tool requires that the hypermedia_in tool has already been run.
def read_secret_version(self, path, version=None, mount_point=DEFAULT_MOUNT_POINT): """Retrieve the secret at the specified location. Supported methods: GET: /{mount_point}/data/{path}. Produces: 200 application/json :param path: Specifies the path of the secret to read. This is specified as part of the URL. :type path: str | unicode :param version: Specifies the version to return. If not set the latest version is returned. :type version: int :param mount_point: The "path" the secret engine was mounted on. :type mount_point: str | unicode :return: The JSON response of the request. :rtype: dict """ params = {} if version is not None: params['version'] = version api_path = '/v1/{mount_point}/data/{path}'.format(mount_point=mount_point, path=path) response = self._adapter.get( url=api_path, params=params, ) return response.json()
Retrieve the secret at the specified location. Supported methods: GET: /{mount_point}/data/{path}. Produces: 200 application/json :param path: Specifies the path of the secret to read. This is specified as part of the URL. :type path: str | unicode :param version: Specifies the version to return. If not set the latest version is returned. :type version: int :param mount_point: The "path" the secret engine was mounted on. :type mount_point: str | unicode :return: The JSON response of the request. :rtype: dict
def get_segment_definer_comments(xml_file, include_version=True): """Returns a dict with the comment column as the value for each segment""" from glue.ligolw.ligolw import LIGOLWContentHandler as h lsctables.use_in(h) # read segment definer table xmldoc, _ = ligolw_utils.load_fileobj(xml_file, gz=xml_file.name.endswith(".gz"), contenthandler=h) seg_def_table = table.get_table(xmldoc, lsctables.SegmentDefTable.tableName) # put comment column into a dict comment_dict = {} for seg_def in seg_def_table: if include_version: full_channel_name = ':'.join([str(seg_def.ifos), str(seg_def.name), str(seg_def.version)]) else: full_channel_name = ':'.join([str(seg_def.ifos), str(seg_def.name)]) comment_dict[full_channel_name] = seg_def.comment return comment_dict
Returns a dict with the comment column as the value for each segment
def create_token(self, user): """ Create a signed token from a user. """ # The password is expected to be a secure hash but we hash it again # for additional safety. We default to MD5 to minimize the length of # the token. (Remember, if an attacker obtains the URL, he can already # log in. This isn't high security.) h = crypto.pbkdf2( self.get_revocation_key(user), self.salt, self.iterations, digest=self.digest, ) return self.sign(self.packer.pack_pk(user.pk) + h)
Create a signed token from a user.
def __within2(value, within=None, errmsg=None, dtype=None): '''validate that a value is in ``within`` and optionally a ``dtype``''' valid, _value = False, value if dtype: try: _value = dtype(value) # TODO: this is a bit loose when dtype is a class valid = _value in within except ValueError: pass else: valid = _value in within if errmsg is None: if dtype: typename = getattr(dtype, '__name__', hasattr(dtype, '__class__') and getattr(dtype.__class__, 'name', dtype)) errmsg = '{0} within \'{1}\''.format(typename, within) else: errmsg = 'within \'{0}\''.format(within) return (valid, _value, errmsg)
validate that a value is in ``within`` and optionally a ``dtype``
def all_selected_options(self): """Returns a list of all selected options belonging to this select tag""" ret = [] for opt in self.options: if opt.is_selected(): ret.append(opt) return ret
Returns a list of all selected options belonging to this select tag
def pull_session(session_id=None, url='default', io_loop=None, arguments=None): ''' Create a session by loading the current server-side document. ``session.document`` will be a fresh document loaded from the server. While the connection to the server is open, changes made on the server side will be applied to this document, and changes made on the client side will be synced to the server. If you don't plan to modify ``session.document`` you probably don't need to use this function; instead you can directly ``show_session()`` or ``server_session()`` without downloading the session's document into your process first. It's much more efficient to avoid downloading the session if you don't need to. In a production scenario, the ``session_id`` should be unique for each browser tab, which keeps users from stomping on each other. It's neither scalable nor secure to use predictable session IDs or to share session IDs across users. For a notebook running on a single machine, ``session_id`` could be something human-readable such as ``"default"`` for convenience. If you allow ``pull_session()`` to generate a unique ``session_id``, you can obtain the generated ID with the ``id`` property on the returned ``ClientSession``. Args: session_id (string, optional) : The name of the session, None to autogenerate a random one (default: None) url : (str, optional): The URL to a Bokeh application on a Bokeh server can also be `"default"` which will connect to the default app URL io_loop (``tornado.ioloop.IOLoop``, optional) : The ``IOLoop`` to use for the websocket arguments (dict[str, str], optional) : A dictionary of key/values to be passed as HTTP request arguments to Bokeh application code (default: None) Note that should only be provided when pulling new sessions. If ``session_id`` is not None, or a session with ``session_id`` already exists, these arguments will have no effect. Returns: ClientSession : A new ``ClientSession`` connected to the server ''' coords = _SessionCoordinates(session_id=session_id, url=url) session = ClientSession(session_id=session_id, websocket_url=websocket_url_for_server_url(coords.url), io_loop=io_loop, arguments=arguments) session.pull() return session
Create a session by loading the current server-side document. ``session.document`` will be a fresh document loaded from the server. While the connection to the server is open, changes made on the server side will be applied to this document, and changes made on the client side will be synced to the server. If you don't plan to modify ``session.document`` you probably don't need to use this function; instead you can directly ``show_session()`` or ``server_session()`` without downloading the session's document into your process first. It's much more efficient to avoid downloading the session if you don't need to. In a production scenario, the ``session_id`` should be unique for each browser tab, which keeps users from stomping on each other. It's neither scalable nor secure to use predictable session IDs or to share session IDs across users. For a notebook running on a single machine, ``session_id`` could be something human-readable such as ``"default"`` for convenience. If you allow ``pull_session()`` to generate a unique ``session_id``, you can obtain the generated ID with the ``id`` property on the returned ``ClientSession``. Args: session_id (string, optional) : The name of the session, None to autogenerate a random one (default: None) url : (str, optional): The URL to a Bokeh application on a Bokeh server can also be `"default"` which will connect to the default app URL io_loop (``tornado.ioloop.IOLoop``, optional) : The ``IOLoop`` to use for the websocket arguments (dict[str, str], optional) : A dictionary of key/values to be passed as HTTP request arguments to Bokeh application code (default: None) Note that should only be provided when pulling new sessions. If ``session_id`` is not None, or a session with ``session_id`` already exists, these arguments will have no effect. Returns: ClientSession : A new ``ClientSession`` connected to the server
def configure(self, options, conf): """Configure the plugin and system, based on selected options. The base plugin class sets the plugin to enabled if the enable option for the plugin (self.enable_opt) is true. """ self.conf = conf if hasattr(options, self.enable_opt): self.enabled = getattr(options, self.enable_opt)
Configure the plugin and system, based on selected options. The base plugin class sets the plugin to enabled if the enable option for the plugin (self.enable_opt) is true.
def make_tz_aware(dt, tz='UTC', is_dst=None): """Add timezone information to a datetime object, only if it is naive. >>> make_tz_aware(datetime.datetime(2001, 9, 8, 7, 6)) datetime.datetime(2001, 9, 8, 7, 6, tzinfo=<UTC>) >>> make_tz_aware(['2010-01-01'], 'PST') [datetime.datetime(2010, 1, 1, 0, 0, tzinfo=<DstTzInfo 'US/Pacific' PST-1 day, 16:00:00 STD>)] >>> make_tz_aware(['1970-10-31', '1970-12-25', '1971-07-04'], 'CDT') # doctest: +NORMALIZE_WHITESPACE [datetime.datetime(1970, 10, 31, 0, 0, tzinfo=<DstTzInfo 'US/Central' CST-1 day, 18:00:00 STD>), datetime.datetime(1970, 12, 25, 0, 0, tzinfo=<DstTzInfo 'US/Central' CST-1 day, 18:00:00 STD>), datetime.datetime(1971, 7, 4, 0, 0, tzinfo=<DstTzInfo 'US/Central' CDT-1 day, 19:00:00 DST>)] >>> make_tz_aware([None, float('nan'), float('inf'), 1980, 1979.25*365.25, '1970-10-31', ... '1970-12-25', '1971-07-04'], ... 'CDT') # doctest: +NORMALIZE_WHITESPACE [None, nan, inf, datetime.datetime(6, 6, 3, 0, 0, tzinfo=<DstTzInfo 'US/Central' LMT-1 day, 18:09:00 STD>), datetime.datetime(1980, 4, 16, 1, 30, tzinfo=<DstTzInfo 'US/Central' CST-1 day, 18:00:00 STD>), datetime.datetime(1970, 10, 31, 0, 0, tzinfo=<DstTzInfo 'US/Central' CST-1 day, 18:00:00 STD>), datetime.datetime(1970, 12, 25, 0, 0, tzinfo=<DstTzInfo 'US/Central' CST-1 day, 18:00:00 STD>), datetime.datetime(1971, 7, 4, 0, 0, tzinfo=<DstTzInfo 'US/Central' CDT-1 day, 19:00:00 DST>)] >>> make_tz_aware(datetime.time(22, 23, 59, 123456)) datetime.time(22, 23, 59, 123456, tzinfo=<UTC>) >>> make_tz_aware(datetime.time(22, 23, 59, 123456), 'PDT', is_dst=True) datetime.time(22, 23, 59, 123456, tzinfo=<DstTzInfo 'US/Pacific' LMT-1 day, 16:07:00 STD>) """ # make sure dt is a datetime, time, or list of datetime/times dt = make_datetime(dt) if not isinstance(dt, (list, datetime.datetime, datetime.date, datetime.time, pd.Timestamp)): return dt # TODO: deal with sequence of timezones try: tz = dt.tzinfo or tz except (ValueError, AttributeError, TypeError): pass try: tzstr = str(tz).strip().upper() if tzstr in TZ_ABBREV_NAME: is_dst = is_dst or tzstr.endswith('DT') tz = TZ_ABBREV_NAME.get(tzstr, tz) except (ValueError, AttributeError, TypeError): pass try: tz = pytz.timezone(tz) except (ValueError, AttributeError, TypeError): # from traceback import print_exc # print_exc() pass try: return tz.localize(dt, is_dst=is_dst) except (ValueError, AttributeError, TypeError): # from traceback import print_exc # print_exc() # TypeError: unsupported operand type(s) for +: 'datetime.time' and 'datetime.timedelta' pass # could be datetime.time, which can't be localized. Insted `replace` the TZ # don't try/except in case dt is not a datetime or time type -- should raise an exception if not isinstance(dt, list): return dt.replace(tzinfo=tz) return [make_tz_aware(dt0, tz=tz, is_dst=is_dst) for dt0 in dt]
Add timezone information to a datetime object, only if it is naive. >>> make_tz_aware(datetime.datetime(2001, 9, 8, 7, 6)) datetime.datetime(2001, 9, 8, 7, 6, tzinfo=<UTC>) >>> make_tz_aware(['2010-01-01'], 'PST') [datetime.datetime(2010, 1, 1, 0, 0, tzinfo=<DstTzInfo 'US/Pacific' PST-1 day, 16:00:00 STD>)] >>> make_tz_aware(['1970-10-31', '1970-12-25', '1971-07-04'], 'CDT') # doctest: +NORMALIZE_WHITESPACE [datetime.datetime(1970, 10, 31, 0, 0, tzinfo=<DstTzInfo 'US/Central' CST-1 day, 18:00:00 STD>), datetime.datetime(1970, 12, 25, 0, 0, tzinfo=<DstTzInfo 'US/Central' CST-1 day, 18:00:00 STD>), datetime.datetime(1971, 7, 4, 0, 0, tzinfo=<DstTzInfo 'US/Central' CDT-1 day, 19:00:00 DST>)] >>> make_tz_aware([None, float('nan'), float('inf'), 1980, 1979.25*365.25, '1970-10-31', ... '1970-12-25', '1971-07-04'], ... 'CDT') # doctest: +NORMALIZE_WHITESPACE [None, nan, inf, datetime.datetime(6, 6, 3, 0, 0, tzinfo=<DstTzInfo 'US/Central' LMT-1 day, 18:09:00 STD>), datetime.datetime(1980, 4, 16, 1, 30, tzinfo=<DstTzInfo 'US/Central' CST-1 day, 18:00:00 STD>), datetime.datetime(1970, 10, 31, 0, 0, tzinfo=<DstTzInfo 'US/Central' CST-1 day, 18:00:00 STD>), datetime.datetime(1970, 12, 25, 0, 0, tzinfo=<DstTzInfo 'US/Central' CST-1 day, 18:00:00 STD>), datetime.datetime(1971, 7, 4, 0, 0, tzinfo=<DstTzInfo 'US/Central' CDT-1 day, 19:00:00 DST>)] >>> make_tz_aware(datetime.time(22, 23, 59, 123456)) datetime.time(22, 23, 59, 123456, tzinfo=<UTC>) >>> make_tz_aware(datetime.time(22, 23, 59, 123456), 'PDT', is_dst=True) datetime.time(22, 23, 59, 123456, tzinfo=<DstTzInfo 'US/Pacific' LMT-1 day, 16:07:00 STD>)
def _domain_event_pmsuspend_cb(conn, domain, reason, opaque): ''' Domain suspend events handler ''' _salt_send_domain_event(opaque, conn, domain, opaque['event'], { 'reason': 'unknown' # currently unused })
Domain suspend events handler
def _get_subject_public_key(cert): """ Returns the SubjectPublicKey asn.1 field of the SubjectPublicKeyInfo field of the server's certificate. This is used in the server verification steps to thwart MitM attacks. :param cert: X509 certificate from pyOpenSSL .get_peer_certificate() :return: byte string of the asn.1 DER encoded SubjectPublicKey field """ public_key = cert.get_pubkey() cryptographic_key = public_key.to_cryptography_key() subject_public_key = cryptographic_key.public_bytes(Encoding.DER, PublicFormat.PKCS1) return subject_public_key
Returns the SubjectPublicKey asn.1 field of the SubjectPublicKeyInfo field of the server's certificate. This is used in the server verification steps to thwart MitM attacks. :param cert: X509 certificate from pyOpenSSL .get_peer_certificate() :return: byte string of the asn.1 DER encoded SubjectPublicKey field
def nunique(expr): """ The distinct count. :param expr: :return: """ output_type = types.int64 if isinstance(expr, SequenceExpr): return NUnique(_value_type=output_type, _inputs=[expr]) elif isinstance(expr, SequenceGroupBy): return GroupedNUnique(_data_type=output_type, _inputs=[expr.to_column()], _grouped=expr.input) elif isinstance(expr, CollectionExpr): unique_input = _extract_unique_input(expr) if unique_input: return nunique(unique_input) else: return NUnique(_value_type=types.int64, _inputs=expr._project_fields) elif isinstance(expr, GroupBy): if expr._to_agg: inputs = expr.input[expr._to_agg.names]._project_fields else: inputs = expr.input._project_fields return GroupedNUnique(_data_type=types.int64, _inputs=inputs, _grouped=expr)
The distinct count. :param expr: :return:
def get_child(self, streamId, childId, options={}): """Get the child of a stream.""" return self.get('stream/' + streamId + '/children/' + childId, options)
Get the child of a stream.
def get_top_n_meanings(strings, n): """ Returns (text, score) for top n strings """ scored_strings = [(s, score_meaning(s)) for s in strings] scored_strings.sort(key=lambda tup: -tup[1]) return scored_strings[:n]
Returns (text, score) for top n strings
def get_resource_metadata(self, resource=None): """ Get resource metadata :param resource: The name of the resource to get metadata for :return: list """ result = self._make_metadata_request(meta_id=0, metadata_type='METADATA-RESOURCE') if resource: result = next((item for item in result if item['ResourceID'] == resource), None) return result
Get resource metadata :param resource: The name of the resource to get metadata for :return: list
def screener(molecules, ensemble, sort_order): """ Uses the virtual screening scores for the receptors, or queries, specified in ensemble to sort the molecules in molecules in the direction specified by sort_order. :param molecules: a list of molecule objects (/classification/molecules.Molecules()) :param ensemble: a tuple with receptors, or a query, that specifies a ensemble :param sort_order: 'asc' or 'dsc'. 'asc' sorts in ascending order (binding energy estimates) 'dsc' sorts in descending order (similarity scores, or binding probabilities) :return: """ # screen modified_molecules = [] for index in range(len(molecules)): modified_molecules.append(molecules[index]) scores = [] #[(score, query)] for query in ensemble: scores.append( (molecules[index].GetProp(query), query)) if sort_order == 'dsc': scores.sort(key=lambda x: float(x[0]), reverse=True) elif sort_order == 'asc': scores.sort(key=lambda x: float(x[0])) modified_molecules[index].SetProp('best_score', format(scores[0][0])) modified_molecules[index].SetProp('best_query', format(scores[0][1])) active = [] decoy = [] non_random = [] for mol in modified_molecules: if float(mol.GetProp('best_score')) == 10000.00: if mol.GetProp('status') == 1: active.append(mol) else: decoy.append(mol) else: non_random.append(mol) if sort_order == 'dsc': non_random.sort(key=lambda mol: float(mol.GetProp('best_score')), reverse=True) #random.shuffle(rand) elif sort_order == 'asc': non_random.sort(key=lambda mol: float(mol.GetProp('best_score'))) #random.shuffle(rand) # append the compounds with scores of 10,000 in the order active, decoy, active, ... rand = [] decoy_length = len(decoy) active_length = len(active) if decoy_length > active_length: for a, d in zip(active, decoy[0:active_length]): rand.append(a) rand.append(d) for d in decoy[active_length:decoy_length]: rand.append(d) elif decoy_length < active_length: for a, d in zip(active[0:decoy_length], decoy): rand.append(a) rand.append(d) for a in active[decoy_length:active_length]: rand.append(a) elif decoy_length == active_length: for a, d in zip(active, decoy): rand.append(a) rand.append(d) modified_molecules = non_random + rand return modified_molecules
Uses the virtual screening scores for the receptors, or queries, specified in ensemble to sort the molecules in molecules in the direction specified by sort_order. :param molecules: a list of molecule objects (/classification/molecules.Molecules()) :param ensemble: a tuple with receptors, or a query, that specifies a ensemble :param sort_order: 'asc' or 'dsc'. 'asc' sorts in ascending order (binding energy estimates) 'dsc' sorts in descending order (similarity scores, or binding probabilities) :return:
def get_ssh_key(): """Returns ssh key to connecting to cluster workers. If the env var TUNE_CLUSTER_SSH_KEY is provided, then this key will be used for syncing across different nodes. """ path = os.environ.get("TUNE_CLUSTER_SSH_KEY", os.path.expanduser("~/ray_bootstrap_key.pem")) if os.path.exists(path): return path return None
Returns ssh key to connecting to cluster workers. If the env var TUNE_CLUSTER_SSH_KEY is provided, then this key will be used for syncing across different nodes.
def dump_privatekey(type, pkey, cipher=None, passphrase=None): """ Dump the private key *pkey* into a buffer string encoded with the type *type*. Optionally (if *type* is :const:`FILETYPE_PEM`) encrypting it using *cipher* and *passphrase*. :param type: The file type (one of :const:`FILETYPE_PEM`, :const:`FILETYPE_ASN1`, or :const:`FILETYPE_TEXT`) :param PKey pkey: The PKey to dump :param cipher: (optional) if encrypted PEM format, the cipher to use :param passphrase: (optional) if encrypted PEM format, this can be either the passphrase to use, or a callback for providing the passphrase. :return: The buffer with the dumped key in :rtype: bytes """ bio = _new_mem_buf() if not isinstance(pkey, PKey): raise TypeError("pkey must be a PKey") if cipher is not None: if passphrase is None: raise TypeError( "if a value is given for cipher " "one must also be given for passphrase") cipher_obj = _lib.EVP_get_cipherbyname(_byte_string(cipher)) if cipher_obj == _ffi.NULL: raise ValueError("Invalid cipher name") else: cipher_obj = _ffi.NULL helper = _PassphraseHelper(type, passphrase) if type == FILETYPE_PEM: result_code = _lib.PEM_write_bio_PrivateKey( bio, pkey._pkey, cipher_obj, _ffi.NULL, 0, helper.callback, helper.callback_args) helper.raise_if_problem() elif type == FILETYPE_ASN1: result_code = _lib.i2d_PrivateKey_bio(bio, pkey._pkey) elif type == FILETYPE_TEXT: if _lib.EVP_PKEY_id(pkey._pkey) != _lib.EVP_PKEY_RSA: raise TypeError("Only RSA keys are supported for FILETYPE_TEXT") rsa = _ffi.gc( _lib.EVP_PKEY_get1_RSA(pkey._pkey), _lib.RSA_free ) result_code = _lib.RSA_print(bio, rsa, 0) else: raise ValueError( "type argument must be FILETYPE_PEM, FILETYPE_ASN1, or " "FILETYPE_TEXT") _openssl_assert(result_code != 0) return _bio_to_string(bio)
Dump the private key *pkey* into a buffer string encoded with the type *type*. Optionally (if *type* is :const:`FILETYPE_PEM`) encrypting it using *cipher* and *passphrase*. :param type: The file type (one of :const:`FILETYPE_PEM`, :const:`FILETYPE_ASN1`, or :const:`FILETYPE_TEXT`) :param PKey pkey: The PKey to dump :param cipher: (optional) if encrypted PEM format, the cipher to use :param passphrase: (optional) if encrypted PEM format, this can be either the passphrase to use, or a callback for providing the passphrase. :return: The buffer with the dumped key in :rtype: bytes
def token_middleware(ctx, get_response): """Reinject token and consistency into requests. """ async def middleware(request): params = request.setdefault('params', {}) if params.get("token") is None: params['token'] = ctx.token return await get_response(request) return middleware
Reinject token and consistency into requests.
def get_items(self) -> Iterator[StoryItem]: """Retrieve all items from a story.""" yield from (StoryItem(self._context, item, self.owner_profile) for item in reversed(self._node['items']))
Retrieve all items from a story.
def crawl(self, urls, name='crawl', api='analyze', **kwargs): """Crawlbot API. Returns a diffbot.Job object to check and retrieve crawl status. """ # If multiple seed URLs are specified, join with whitespace. if isinstance(urls, list): urls = ' '.join(urls) url = self.endpoint('crawl') process_url = self.endpoint(api) params = { 'token': self._token, 'seeds': urls, 'name': name, 'apiUrl': process_url, } # Add any additional named parameters as accepted by Crawlbot params['maxToCrawl'] = 10 params.update(kwargs) self._get(url, params=params) return Job(self._token, name, self._version)
Crawlbot API. Returns a diffbot.Job object to check and retrieve crawl status.
def check_uniqueness(self, *args): """For a unique index, check if the given args are not used twice For the parameters, seen BaseIndex.check_uniqueness """ self.get_unique_index().check_uniqueness(*self.prepare_args(args, transform=False))
For a unique index, check if the given args are not used twice For the parameters, seen BaseIndex.check_uniqueness
def refresh(self): """Re-pulls the data from redis""" pipe = self.redis.pipeline() pipe.hget(EXPERIMENT_REDIS_KEY_TEMPLATE % self.name, "metadata") pipe.hget(EXPERIMENT_REDIS_KEY_TEMPLATE % self.name, "choices") pipe.hget(EXPERIMENT_REDIS_KEY_TEMPLATE % self.name, "default-choice") results = pipe.execute() if results[0] == None: raise ExperimentException(self.name, "Does not exist") self.metadata = parse_json(results[0]) self.choice_names = parse_json(results[1]) if results[1] != None else [] self.default_choice = escape.to_unicode(results[2]) self._choices = None
Re-pulls the data from redis
def perform_word_selection(self, event=None): """ Performs word selection :param event: QMouseEvent """ self.editor.setTextCursor( TextHelper(self.editor).word_under_cursor(True)) if event: event.accept()
Performs word selection :param event: QMouseEvent
def _strip_metadata(self, my_dict): """ Create a copy of dict and remove not needed data """ new_dict = copy.deepcopy(my_dict) if const.START in new_dict: del new_dict[const.START] if const.END in new_dict: del new_dict[const.END] if const.WHITELIST in new_dict: del new_dict[const.WHITELIST] if const.WHITELIST_START in new_dict: del new_dict[const.WHITELIST_START] if const.WHITELIST_END in new_dict: del new_dict[const.WHITELIST_END] return new_dict
Create a copy of dict and remove not needed data
def retrieve(self, request, project, pk=None): """ GET method implementation for a note detail """ try: serializer = JobNoteSerializer(JobNote.objects.get(id=pk)) return Response(serializer.data) except JobNote.DoesNotExist: return Response("No note with id: {0}".format(pk), status=HTTP_404_NOT_FOUND)
GET method implementation for a note detail
def subsc_search(self, article_code, **kwargs): '''taobao.vas.subsc.search 订购记录导出 用于ISV查询自己名下的应用及收费项目的订购记录''' request = TOPRequest('taobao.vas.subsc.search') request['article_code'] = article_code for k, v in kwargs.iteritems(): if k not in ('item_code', 'nick', 'start_deadline', 'end_deadline', 'status', 'autosub', 'expire_notice', 'page_size','page_no') and v==None: continue request[k] = v self.create(self.execute(request), fields=['article_subs','total_item'], models={'article_subs':ArticleSub}) return self.article_subs
taobao.vas.subsc.search 订购记录导出 用于ISV查询自己名下的应用及收费项目的订购记录
def raw_pitch_accuracy(ref_voicing, ref_cent, est_voicing, est_cent, cent_tolerance=50): """Compute the raw pitch accuracy given two pitch (frequency) sequences in cents and matching voicing indicator sequences. The first pitch and voicing arrays are treated as the reference (truth), and the second two as the estimate (prediction). All 4 sequences must be of the same length. Examples -------- >>> ref_time, ref_freq = mir_eval.io.load_time_series('ref.txt') >>> est_time, est_freq = mir_eval.io.load_time_series('est.txt') >>> (ref_v, ref_c, ... est_v, est_c) = mir_eval.melody.to_cent_voicing(ref_time, ... ref_freq, ... est_time, ... est_freq) >>> raw_pitch = mir_eval.melody.raw_pitch_accuracy(ref_v, ref_c, ... est_v, est_c) Parameters ---------- ref_voicing : np.ndarray Reference boolean voicing array ref_cent : np.ndarray Reference pitch sequence in cents est_voicing : np.ndarray Estimated boolean voicing array est_cent : np.ndarray Estimate pitch sequence in cents cent_tolerance : float Maximum absolute deviation for a cent value to be considerd correct (Default value = 50) Returns ------- raw_pitch : float Raw pitch accuracy, the fraction of voiced frames in ref_cent for which est_cent provides a correct frequency values (within cent_tolerance cents). """ validate_voicing(ref_voicing, est_voicing) validate(ref_voicing, ref_cent, est_voicing, est_cent) ref_voicing = ref_voicing.astype(bool) est_voicing = est_voicing.astype(bool) # When input arrays are empty, return 0 by special case if ref_voicing.size == 0 or est_voicing.size == 0 \ or ref_cent.size == 0 or est_cent.size == 0: return 0. # If there are no voiced frames in reference, metric is 0 if ref_voicing.sum() == 0: return 0. # Raw pitch = the number of voiced frames in the reference for which the # estimate provides a correct frequency value (within cent_tolerance cents) # NB: voicing estimation is ignored in this measure matching_voicing = ref_voicing * (est_cent > 0) cent_diff = np.abs(ref_cent - est_cent)[matching_voicing] frame_correct = (cent_diff < cent_tolerance) raw_pitch = (frame_correct).sum()/float(ref_voicing.sum()) return raw_pitch
Compute the raw pitch accuracy given two pitch (frequency) sequences in cents and matching voicing indicator sequences. The first pitch and voicing arrays are treated as the reference (truth), and the second two as the estimate (prediction). All 4 sequences must be of the same length. Examples -------- >>> ref_time, ref_freq = mir_eval.io.load_time_series('ref.txt') >>> est_time, est_freq = mir_eval.io.load_time_series('est.txt') >>> (ref_v, ref_c, ... est_v, est_c) = mir_eval.melody.to_cent_voicing(ref_time, ... ref_freq, ... est_time, ... est_freq) >>> raw_pitch = mir_eval.melody.raw_pitch_accuracy(ref_v, ref_c, ... est_v, est_c) Parameters ---------- ref_voicing : np.ndarray Reference boolean voicing array ref_cent : np.ndarray Reference pitch sequence in cents est_voicing : np.ndarray Estimated boolean voicing array est_cent : np.ndarray Estimate pitch sequence in cents cent_tolerance : float Maximum absolute deviation for a cent value to be considerd correct (Default value = 50) Returns ------- raw_pitch : float Raw pitch accuracy, the fraction of voiced frames in ref_cent for which est_cent provides a correct frequency values (within cent_tolerance cents).
def assign(self, **kwargs): r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. For Python 3.6 and above, later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. For Python 3.5 and below, the order of keyword arguments is not specified, you cannot refer to newly created or modified columns. All items are computed first, and then assigned in alphabetical order. .. versionchanged :: 0.23.0 Keyword argument order is maintained for Python 3.6 and later. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 In Python 3.6+, you can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 """ data = self.copy() # >= 3.6 preserve order of kwargs if PY36: for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) else: # <= 3.5: do all calculations first... results = OrderedDict() for k, v in kwargs.items(): results[k] = com.apply_if_callable(v, data) # <= 3.5 and earlier results = sorted(results.items()) # ... and then assign for k, v in results: data[k] = v return data
r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. For Python 3.6 and above, later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. For Python 3.5 and below, the order of keyword arguments is not specified, you cannot refer to newly created or modified columns. All items are computed first, and then assigned in alphabetical order. .. versionchanged :: 0.23.0 Keyword argument order is maintained for Python 3.6 and later. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 In Python 3.6+, you can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15
def endpoint(self, endpoint): """Like :meth:`Flask.endpoint` but for a blueprint. This does not prefix the endpoint with the blueprint name, this has to be done explicitly by the user of this method. If the endpoint is prefixed with a `.` it will be registered to the current blueprint, otherwise it's an application independent endpoint. """ def decorator(f): def register_endpoint(state): state.app.view_functions[endpoint] = f self.record_once(register_endpoint) return f return decorator
Like :meth:`Flask.endpoint` but for a blueprint. This does not prefix the endpoint with the blueprint name, this has to be done explicitly by the user of this method. If the endpoint is prefixed with a `.` it will be registered to the current blueprint, otherwise it's an application independent endpoint.
def parse_ipv6_literal_host(entity, default_port): """Validates an IPv6 literal host:port string. Returns a 2-tuple of IPv6 literal followed by port where port is default_port if it wasn't specified in entity. :Parameters: - `entity`: A string that represents an IPv6 literal enclosed in braces (e.g. '[::1]' or '[::1]:27017'). - `default_port`: The port number to use when one wasn't specified in entity. """ if entity.find(']') == -1: raise ValueError("an IPv6 address literal must be " "enclosed in '[' and ']' according " "to RFC 2732.") i = entity.find(']:') if i == -1: return entity[1:-1], default_port return entity[1: i], entity[i + 2:]
Validates an IPv6 literal host:port string. Returns a 2-tuple of IPv6 literal followed by port where port is default_port if it wasn't specified in entity. :Parameters: - `entity`: A string that represents an IPv6 literal enclosed in braces (e.g. '[::1]' or '[::1]:27017'). - `default_port`: The port number to use when one wasn't specified in entity.
def delete(self, obj): """ Delete an object in CDSTAR and remove it from the catalog. :param obj: An object ID or an Object instance. """ obj = self.api.get_object(getattr(obj, 'id', obj)) obj.delete() self.remove(obj.id)
Delete an object in CDSTAR and remove it from the catalog. :param obj: An object ID or an Object instance.
def _get_request_type(self): """Find requested request type in POST request.""" value = self.document.tag.lower() if value in allowed_request_types[self.params['service']]: self.params["request"] = value else: raise OWSInvalidParameterValue("Request type %s is not supported" % value, value="request") return self.params["request"]
Find requested request type in POST request.
def repr_args(args): """formats a list of function arguments prettily but as working code (kwargs are tuples (argname, argvalue) """ res = [] for x in args: if isinstance(x, tuple) and len(x) == 2: key, value = x # todo: exclude this key if value is its default res += ["%s=%s" % (key, repr_arg(value))] else: res += [repr_arg(x)] return ', '.join(res)
formats a list of function arguments prettily but as working code (kwargs are tuples (argname, argvalue)
def all(cls, klass, db_session=None): """ returns all objects of specific type - will work correctly with sqlalchemy inheritance models, you should normally use models base_query() instead of this function its for bw. compat purposes :param klass: :param db_session: :return: """ db_session = get_db_session(db_session) return db_session.query(klass)
returns all objects of specific type - will work correctly with sqlalchemy inheritance models, you should normally use models base_query() instead of this function its for bw. compat purposes :param klass: :param db_session: :return:
def check_enable_mode(self, check_string=""): """Check if in enable mode. Return boolean. :param check_string: Identification of privilege mode from device :type check_string: str """ self.write_channel(self.RETURN) output = self.read_until_prompt() return check_string in output
Check if in enable mode. Return boolean. :param check_string: Identification of privilege mode from device :type check_string: str
def jobs(request): ''' This is the view used by the executor.py scripts for getting / putting the test results. Fetching some file for testing is changing the database, so using GET here is not really RESTish. Whatever. A visible shared secret in the request is no problem, since the executors come from trusted networks. The secret only protects this view from outside foreigners. TODO: Make it a real API, based on some framework. TODO: Factor out state model from this method into some model. POST requests with 'Action'='get_config' are expected to contain the following parameters: 'MachineId', 'Config', 'Secret', 'UUID' All other POST requests are expected to contain the following parameters: 'SubmissionFileId', 'Message', 'ErrorCode', 'Action', 'Secret', 'UUID' GET requests are expected to contain the following parameters: 'Secret', 'UUID' GET reponses deliver the following elements in the header: 'SubmissionFileId', 'Timeout', 'Action', 'PostRunValidation' ''' try: if request.method == 'GET': secret = request.GET['Secret'] uuid = request.GET['UUID'] elif request.method == 'POST': secret = request.POST['Secret'] uuid = request.POST['UUID'] except Exception as e: logger.error( "Error finding the neccessary data in the executor request: " + str(e)) raise PermissionDenied if secret != settings.JOB_EXECUTOR_SECRET: raise PermissionDenied # Update last_contact information for test machine machine, created = TestMachine.objects.update_or_create( host=uuid, defaults={'last_contact': datetime.now()}) if created: # ask for configuration of new execution hosts by returning the according action logger.debug( "Test machine is unknown, creating entry and asking executor for configuration.") response = HttpResponse() response['Action'] = 'get_config' response['APIVersion'] = '1.0.0' # semantic versioning response['MachineId'] = machine.pk return response if not machine.enabled: # Act like no jobs are given for him raise Http404 if request.method == "GET": # Clean up submissions where the answer from the executors took too long pending_submissions = Submission.pending_tests.filter( file_upload__fetched__isnull=False) #logger.debug("%u pending submission(s)"%(len(pending_submissions))) for sub in pending_submissions: max_delay = timedelta( seconds=sub.assignment.attachment_test_timeout) # There is a small chance that meanwhile the result was delivered, so fetched became NULL if sub.file_upload.fetched and sub.file_upload.fetched + max_delay < datetime.now(): logger.debug( "Resetting executor fetch status for submission %u, due to timeout" % sub.pk) # TODO: Late delivery for such a submission by the executor may lead to result overwriting. Check this. sub.clean_fetch_date() if sub.state == Submission.TEST_VALIDITY_PENDING: sub.save_validation_result( machine, "Killed due to non-reaction. Please check your application for deadlocks or keyboard input.", "Killed due to non-reaction on timeout signals.") sub.state = Submission.TEST_VALIDITY_FAILED sub.inform_student(request, sub.state) if sub.state == Submission.TEST_FULL_PENDING: sub.save_fulltest_result( machine, "Killed due to non-reaction on timeout signals. Student not informed, since this was the full test.") sub.state = Submission.TEST_FULL_FAILED sub.save() # Now get an appropriate submission. submissions = Submission.pending_tests submissions = submissions.filter(assignment__in=machine.assignments.all()) \ .filter(file_upload__isnull=False) \ .filter(file_upload__fetched__isnull=True) if len(submissions) == 0: # Nothing found to be fetchable #logger.debug("No pending work for executors") raise Http404 else: sub = submissions[0] sub.save_fetch_date() sub.modified = datetime.now() sub.save() # create HTTP response with file download f = sub.file_upload.attachment # on dev server, we sometimes have stale database entries if not os.access(f.path, os.F_OK): mail_managers('Warning: Missing file', 'Missing file on storage for submission file entry %u: %s' % ( sub.file_upload.pk, str(sub.file_upload.attachment)), fail_silently=True) raise Http404 response = HttpResponse(f, content_type='application/binary') response['APIVersion'] = '1.0.0' # semantic versioning response['Content-Disposition'] = 'attachment; filename="%s"' % sub.file_upload.basename() response['SubmissionFileId'] = str(sub.file_upload.pk) response['SubmissionOriginalFilename'] = sub.file_upload.original_filename response['SubmissionId'] = str(sub.pk) response['SubmitterName'] = sub.submitter.get_full_name() response['SubmitterStudentId'] = sub.submitter.profile.student_id response['AuthorNames'] = sub.authors.all() response['SubmitterStudyProgram'] = str(sub.submitter.profile.study_program) response['Course'] = str(sub.assignment.course) response['Assignment'] = str(sub.assignment) response['Timeout'] = sub.assignment.attachment_test_timeout if sub.state == Submission.TEST_VALIDITY_PENDING: response['Action'] = 'test_validity' response['PostRunValidation'] = sub.assignment.validity_test_url(request) elif sub.state == Submission.TEST_FULL_PENDING or sub.state == Submission.CLOSED_TEST_FULL_PENDING: response['Action'] = 'test_full' response['PostRunValidation'] = sub.assignment.full_test_url(request) else: assert (False) logger.debug("Delivering submission %u as new %s job" % (sub.pk, response['Action'])) return response elif request.method == "POST": # first check if this is just configuration data, and not a job result if request.POST['Action'] == 'get_config': machine = TestMachine.objects.get( pk=int(request.POST['MachineId'])) machine.config = request.POST['Config'] machine.save() return HttpResponse(status=201) # executor.py is providing the results as POST parameters sid = request.POST['SubmissionFileId'] submission_file = get_object_or_404(SubmissionFile, pk=sid) sub = submission_file.submissions.all()[0] logger.debug("Storing executor results for submission %u" % (sub.pk)) error_code = int(request.POST['ErrorCode']) # Job state: Waiting for validity test # Possible with + without full test # Possible with + without grading if request.POST['Action'] == 'test_validity' and sub.state == Submission.TEST_VALIDITY_PENDING: sub.save_validation_result( machine, request.POST['Message'], request.POST['MessageTutor']) if error_code == 0: # We have a full test if sub.assignment.attachment_test_full: logger.debug( "Validity test working, setting state to pending full test") sub.state = Submission.TEST_FULL_PENDING # We have no full test else: logger.debug( "Validity test working, setting state to tested") sub.state = Submission.SUBMITTED_TESTED if not sub.assignment.is_graded(): # Assignment is not graded. We are done here. sub.state = Submission.CLOSED sub.inform_student(request, Submission.CLOSED) else: logger.debug( "Validity test not working, setting state to failed") sub.state = Submission.TEST_VALIDITY_FAILED sub.inform_student(request, sub.state) # Job state: Waiting for full test # Possible with + without grading elif request.POST['Action'] == 'test_full' and sub.state == Submission.TEST_FULL_PENDING: sub.save_fulltest_result( machine, request.POST['MessageTutor']) if error_code == 0: if sub.assignment.is_graded(): logger.debug("Full test working, setting state to tested (since graded)") sub.state = Submission.SUBMITTED_TESTED else: logger.debug("Full test working, setting state to closed (since not graded)") sub.state = Submission.CLOSED sub.inform_student(request, Submission.CLOSED) else: logger.debug("Full test not working, setting state to failed") sub.state = Submission.TEST_FULL_FAILED # full tests may be performed several times and are meant to be a silent activity # therefore, we send no mail to the student here # Job state: Waiting for full test of already closed jobs ("re-test") # Grading is already done elif request.POST['Action'] == 'test_full' and sub.state == Submission.CLOSED_TEST_FULL_PENDING: logger.debug( "Closed full test done, setting state to closed again") sub.save_fulltest_result( machine, request.POST['MessageTutor']) sub.state = Submission.CLOSED # full tests may be performed several times and are meant to be a silent activity # therefore, we send no mail to the student here elif request.POST['Action'] == 'test_validity' and sub.state == Submission.TEST_VALIDITY_FAILED: # Can happen if the validation is set to failed due to timeout, but the executor delivers the late result. # Happens in reality only with >= 2 executors, since the second one is pulling for new jobs and triggers # the timeout check while the first one is still stucked with the big job. # Can be ignored. logger.debug( "Ignoring executor result, since the submission is already marked as failed.") else: msg = ''' Dear OpenSubmit administrator, the executors returned some result, but this does not fit to the current submission state. This is a strong indication for a bug in OpenSubmit - sorry for that. The system will ignore the report from executor and mark the job as to be repeated. Please report this on the project GitHub page for further investigation. Submission ID: %u Submission File ID reported by the executor: %u Action reported by the executor: %s Current state of the submission: %s (%s) Message from the executor: %s Error code from the executor: %u ''' % (sub.pk, submission_file.pk, request.POST['Action'], sub.state_for_tutors(), sub.state, request.POST['Message'], error_code) mail_managers('Warning: Inconsistent job state', msg, fail_silently=True) # Mark work as done sub.save() sub.clean_fetch_date() return HttpResponse(status=201)
This is the view used by the executor.py scripts for getting / putting the test results. Fetching some file for testing is changing the database, so using GET here is not really RESTish. Whatever. A visible shared secret in the request is no problem, since the executors come from trusted networks. The secret only protects this view from outside foreigners. TODO: Make it a real API, based on some framework. TODO: Factor out state model from this method into some model. POST requests with 'Action'='get_config' are expected to contain the following parameters: 'MachineId', 'Config', 'Secret', 'UUID' All other POST requests are expected to contain the following parameters: 'SubmissionFileId', 'Message', 'ErrorCode', 'Action', 'Secret', 'UUID' GET requests are expected to contain the following parameters: 'Secret', 'UUID' GET reponses deliver the following elements in the header: 'SubmissionFileId', 'Timeout', 'Action', 'PostRunValidation'
def get_correlation(self, t1, t2): """ Computes the correlation coefficient for the specified periods. :param float t1: First period of interest. :param float t2: Second period of interest. :return float rho: The predicted correlation coefficient. """ t_min = min(t1, t2) t_max = max(t1, t2) c1 = 1.0 c1 -= np.cos(np.pi / 2.0 - np.log(t_max / max(t_min, 0.109)) * 0.366) if t_max < 0.2: c2 = 0.105 * (1.0 - 1.0 / (1.0 + np.exp(100.0 * t_max - 5.0))) c2 = 1.0 - c2 * (t_max - t_min) / (t_max - 0.0099) else: c2 = 0 if t_max < 0.109: c3 = c2 else: c3 = c1 c4 = c1 c4 += 0.5 * (np.sqrt(c3) - c3) * (1.0 + np.cos(np.pi * t_min / 0.109)) if t_max <= 0.109: rho = c2 elif t_min > 0.109: rho = c1 elif t_max < 0.2: rho = min(c2, c4) else: rho = c4 return rho
Computes the correlation coefficient for the specified periods. :param float t1: First period of interest. :param float t2: Second period of interest. :return float rho: The predicted correlation coefficient.
def update_devices(self, devices): """Update values from response of URL_DEVICES, callback if changed.""" for qspacket in devices: try: qsid = qspacket[QS_ID] except KeyError: _LOGGER.debug("Device without ID: %s", qspacket) continue if qsid not in self: self[qsid] = QSDev(data=qspacket) dev = self[qsid] dev.data = qspacket # Decode value from QSUSB newqs = _legacy_status(qspacket[QS_VALUE]) if dev.is_dimmer: # Adjust dimmer exponentially to get a smoother effect newqs = min(round(math.pow(newqs, self.dim_adj)), 100) newin = round(newqs * _MAX / 100) if abs(dev.value - newin) > 1: # Significant change _LOGGER.debug("%s qs=%s --> %s", qsid, newqs, newin) dev.value = newin self._cb_value_changed(self, qsid, newin)
Update values from response of URL_DEVICES, callback if changed.
def ud_grade_ipix(ipix, nside_in, nside_out, nest=False): """ Upgrade or degrade resolution of a pixel list. Parameters: ----------- ipix:array-like the input pixel(s) nside_in:int the nside of the input pixel(s) nside_out:int the desired nside of the output pixel(s) order:str pixel ordering of input and output ("RING" or "NESTED") Returns: -------- pix_out:array-like the upgraded or degraded pixel array """ if nside_in == nside_out: return ipix elif nside_in < nside_out: return u_grade_ipix(ipix, nside_in, nside_out, nest) elif nside_in > nside_out: return d_grade_ipix(ipix, nside_in, nside_out, nest)
Upgrade or degrade resolution of a pixel list. Parameters: ----------- ipix:array-like the input pixel(s) nside_in:int the nside of the input pixel(s) nside_out:int the desired nside of the output pixel(s) order:str pixel ordering of input and output ("RING" or "NESTED") Returns: -------- pix_out:array-like the upgraded or degraded pixel array
def reconnect(self): """ (Re)establish the gateway connection @return: True if connection was established """ self._converters.clear() self._gateway = None self._xsltFactory = None try: # print("Starting Java gateway on port: %s" % self._gwPort) self._gateway = JavaGateway(GatewayClient(port=self._gwPort)) self._xsltFactory = self._gateway.jvm.org.pyjxslt.XSLTTransformerFactory('') self._refresh_converters() except (socket.error, Py4JNetworkError) as e: print(e) self._gateway = None return False return True
(Re)establish the gateway connection @return: True if connection was established
def requires(self, extras=()): """List of Requirements needed for this distro if `extras` are used""" dm = self._dep_map deps = [] deps.extend(dm.get(None, ())) for ext in extras: try: deps.extend(dm[safe_extra(ext)]) except KeyError: raise UnknownExtra( "%s has no such extra feature %r" % (self, ext) ) return deps
List of Requirements needed for this distro if `extras` are used
def _do_search(conf): ''' Builds connection and search arguments, performs the LDAP search and formats the results as a dictionary appropriate for pillar use. ''' # Build LDAP connection args connargs = {} for name in ['server', 'port', 'tls', 'binddn', 'bindpw', 'anonymous']: connargs[name] = _config(name, conf) if connargs['binddn'] and connargs['bindpw']: connargs['anonymous'] = False # Build search args try: _filter = conf['filter'] except KeyError: raise SaltInvocationError('missing filter') _dn = _config('dn', conf) scope = _config('scope', conf) _lists = _config('lists', conf) or [] _attrs = _config('attrs', conf) or [] _dict_key_attr = _config('dict_key_attr', conf, 'dn') attrs = _lists + _attrs + [_dict_key_attr] if not attrs: attrs = None # Perform the search try: result = __salt__['ldap.search'](_filter, _dn, scope, attrs, **connargs)['results'] except IndexError: # we got no results for this search log.debug('LDAP search returned no results for filter %s', _filter) result = {} except Exception: log.critical( 'Failed to retrieve pillar data from LDAP:\n', exc_info=True ) return {} return result
Builds connection and search arguments, performs the LDAP search and formats the results as a dictionary appropriate for pillar use.
def yellow(cls): "Make the text foreground color yellow." wAttributes = cls._get_text_attributes() wAttributes &= ~win32.FOREGROUND_MASK wAttributes |= win32.FOREGROUND_YELLOW cls._set_text_attributes(wAttributes)
Make the text foreground color yellow.
def _started_channels(self): """Reimplemented to make a history request and load %guiref.""" super(IPythonWidget, self)._started_channels() self._load_guiref_magic() self.kernel_manager.shell_channel.history(hist_access_type='tail', n=1000)
Reimplemented to make a history request and load %guiref.
def _add_study_provenance( self, phenotyping_center, colony, project_fullname, pipeline_name, pipeline_stable_id, procedure_stable_id, procedure_name, parameter_stable_id, parameter_name, statistical_method, resource_name, row_num ): """ :param phenotyping_center: str, from self.files['all'] :param colony: str, from self.files['all'] :param project_fullname: str, from self.files['all'] :param pipeline_name: str, from self.files['all'] :param pipeline_stable_id: str, from self.files['all'] :param procedure_stable_id: str, from self.files['all'] :param procedure_name: str, from self.files['all'] :param parameter_stable_id: str, from self.files['all'] :param parameter_name: str, from self.files['all'] :param statistical_method: str, from self.files['all'] :param resource_name: str, from self.files['all'] :return: study bnode """ provenance_model = Provenance(self.graph) model = Model(self.graph) # Add provenance # A study is a blank node equal to its parts study_bnode = self.make_id("{0}{1}{2}{3}{4}{5}{6}{7}".format( phenotyping_center, colony, project_fullname, pipeline_stable_id, procedure_stable_id, parameter_stable_id, statistical_method, resource_name), '_') model.addIndividualToGraph( study_bnode, None, self.globaltt['study']) # List of nodes linked to study with has_part property study_parts = [] # Add study parts model.addIndividualToGraph(self.resolve(procedure_stable_id), procedure_name) study_parts.append(self.resolve(procedure_stable_id)) study_parts.append(self.resolve(statistical_method)) provenance_model.add_study_parts(study_bnode, study_parts) # Add parameter/measure statement: study measures parameter parameter_label = "{0} ({1})".format(parameter_name, procedure_name) logging.info("Adding Provenance") model.addIndividualToGraph( self.resolve(parameter_stable_id), parameter_label) provenance_model.add_study_measure( study_bnode, self.resolve(parameter_stable_id)) # Add Colony colony_bnode = self.make_id("{0}".format(colony), '_') model.addIndividualToGraph(colony_bnode, colony) # Add study agent model.addIndividualToGraph( self.resolve(phenotyping_center), phenotyping_center, self.globaltt['organization']) # self.graph model.addTriple( study_bnode, self.globaltt['has_agent'], self.resolve(phenotyping_center)) # add pipeline and project model.addIndividualToGraph( self.resolve(pipeline_stable_id), pipeline_name) # self.graph model.addTriple( study_bnode, self.globaltt['part_of'], self.resolve(pipeline_stable_id)) model.addIndividualToGraph( self.resolve(project_fullname), project_fullname, self.globaltt['project']) # self.graph model.addTriple( study_bnode, self.globaltt['part_of'], self.resolve(project_fullname)) return study_bnode
:param phenotyping_center: str, from self.files['all'] :param colony: str, from self.files['all'] :param project_fullname: str, from self.files['all'] :param pipeline_name: str, from self.files['all'] :param pipeline_stable_id: str, from self.files['all'] :param procedure_stable_id: str, from self.files['all'] :param procedure_name: str, from self.files['all'] :param parameter_stable_id: str, from self.files['all'] :param parameter_name: str, from self.files['all'] :param statistical_method: str, from self.files['all'] :param resource_name: str, from self.files['all'] :return: study bnode
def get_agents(self): """Gets all ``Agents``. In plenary mode, the returned list contains all known agents or an error results. Otherwise, the returned list may contain only those agents that are accessible through this session. return: (osid.authentication.AgentList) - a list of ``Agents`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceLookupSession.get_resources # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('authentication', collection='Agent', runtime=self._runtime) result = collection.find(self._view_filter()).sort('_id', DESCENDING) return objects.AgentList(result, runtime=self._runtime, proxy=self._proxy)
Gets all ``Agents``. In plenary mode, the returned list contains all known agents or an error results. Otherwise, the returned list may contain only those agents that are accessible through this session. return: (osid.authentication.AgentList) - a list of ``Agents`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def astype(self, dtype): """Return a copy of this space with new ``dtype``. Parameters ---------- dtype : Scalar data type of the returned space. Can be provided in any way the `numpy.dtype` constructor understands, e.g. as built-in type or as a string. Data types with non-trivial shapes are not allowed. Returns ------- newspace : `TensorSpace` Version of this space with given data type. """ if dtype is None: # Need to filter this out since Numpy iterprets it as 'float' raise ValueError('`None` is not a valid data type') dtype = np.dtype(dtype) if dtype == self.dtype: return self if is_numeric_dtype(self.dtype): # Caching for real and complex versions (exact dtype mappings) if dtype == self.__real_dtype: if self.__real_space is None: self.__real_space = self._astype(dtype) return self.__real_space elif dtype == self.__complex_dtype: if self.__complex_space is None: self.__complex_space = self._astype(dtype) return self.__complex_space else: return self._astype(dtype) else: return self._astype(dtype)
Return a copy of this space with new ``dtype``. Parameters ---------- dtype : Scalar data type of the returned space. Can be provided in any way the `numpy.dtype` constructor understands, e.g. as built-in type or as a string. Data types with non-trivial shapes are not allowed. Returns ------- newspace : `TensorSpace` Version of this space with given data type.
def _set_src_vtep_ip(self, v, load=False): """ Setter method for src_vtep_ip, mapped from YANG variable /overlay/access_list/type/vxlan/standard/seq/src_vtep_ip (inet:ipv4-address) If this variable is read-only (config: false) in the source YANG file, then _set_src_vtep_ip is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_src_vtep_ip() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="src-vtep-ip", rest_name="src-vtep-ip-host", parent=self, choice=(u'choice-src-vtep-ip', u'case-src-vtep-ip'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'src vtep ip address: A.B.C.D', u'alt-name': u'src-vtep-ip-host', u'cli-incomplete-command': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='inet:ipv4-address', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """src_vtep_ip must be of a type compatible with inet:ipv4-address""", 'defined-type': "inet:ipv4-address", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="src-vtep-ip", rest_name="src-vtep-ip-host", parent=self, choice=(u'choice-src-vtep-ip', u'case-src-vtep-ip'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'src vtep ip address: A.B.C.D', u'alt-name': u'src-vtep-ip-host', u'cli-incomplete-command': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-vxlan-visibility', defining_module='brocade-vxlan-visibility', yang_type='inet:ipv4-address', is_config=True)""", }) self.__src_vtep_ip = t if hasattr(self, '_set'): self._set()
Setter method for src_vtep_ip, mapped from YANG variable /overlay/access_list/type/vxlan/standard/seq/src_vtep_ip (inet:ipv4-address) If this variable is read-only (config: false) in the source YANG file, then _set_src_vtep_ip is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_src_vtep_ip() directly.
def _authenticate(self): """Authenticate user and generate token.""" self.cleanup_headers() url = LOGIN_ENDPOINT data = self.query( url, method='POST', extra_params={ 'email': self.__username, 'password': self.__password }) if isinstance(data, dict) and data.get('success'): data = data.get('data') self.authenticated = data.get('authenticated') self.country_code = data.get('countryCode') self.date_created = data.get('dateCreated') self.__token = data.get('token') self.userid = data.get('userId') # update header with the generated token self.__headers['Authorization'] = self.__token
Authenticate user and generate token.
def check_subscriber_key_length(app_configs=None, **kwargs): """ Check that DJSTRIPE_SUBSCRIBER_CUSTOMER_KEY fits in metadata. Docs: https://stripe.com/docs/api#metadata """ from . import settings as djstripe_settings messages = [] key = djstripe_settings.SUBSCRIBER_CUSTOMER_KEY key_size = len(str(key)) if key and key_size > 40: messages.append( checks.Error( "DJSTRIPE_SUBSCRIBER_CUSTOMER_KEY must be no more than 40 characters long", hint="Current value: %r (%i characters)" % (key, key_size), id="djstripe.E001", ) ) return messages
Check that DJSTRIPE_SUBSCRIBER_CUSTOMER_KEY fits in metadata. Docs: https://stripe.com/docs/api#metadata
def pandasdfsummarytojson(df, ndigits=3): """ Convert the result of a Parameters ---------- df : The result of a Pandas describe operation. ndigits : int, optional - The number of significant digits to round to. Returns ------- A json object which captures the describe. Keys are field names and values are dictionaries with all of the indexes returned by the Pandas describe. """ df = df.transpose() return {k: _pandassummarytojson(v, ndigits) for k, v in df.iterrows()}
Convert the result of a Parameters ---------- df : The result of a Pandas describe operation. ndigits : int, optional - The number of significant digits to round to. Returns ------- A json object which captures the describe. Keys are field names and values are dictionaries with all of the indexes returned by the Pandas describe.
def all(cls, sort=None, limit=None): """Returns all objects of this type. Alias for where() (without filter arguments). See `where` for documentation on the `sort` and `limit` parameters. """ return cls.where(sort=sort, limit=limit)
Returns all objects of this type. Alias for where() (without filter arguments). See `where` for documentation on the `sort` and `limit` parameters.
def parse_config(data: dict) -> dict: """Parse MIP config file. Args: data (dict): raw YAML input from MIP analysis config file Returns: dict: parsed data """ return { 'email': data.get('email'), 'family': data['family_id'], 'samples': [{ 'id': sample_id, 'type': analysis_type, } for sample_id, analysis_type in data['analysis_type'].items()], 'config_path': data['config_file_analysis'], 'is_dryrun': True if 'dry_run_all' in data else False, 'log_path': data['log_file'], 'out_dir': data['outdata_dir'], 'priority': data['slurm_quality_of_service'], 'sampleinfo_path': data['sample_info_file'], }
Parse MIP config file. Args: data (dict): raw YAML input from MIP analysis config file Returns: dict: parsed data
def get_tasks(self): """ Get the tasks attached to the instance Returns ------- list List of tasks (:class:`asyncio.Task`) """ tasks = self._get_tasks() tasks.extend(self._streams.get_tasks(self)) return tasks
Get the tasks attached to the instance Returns ------- list List of tasks (:class:`asyncio.Task`)
def diffuser_conical(Di1, Di2, l=None, angle=None, fd=None, Re=None, roughness=0.0, method='Rennels'): r'''Returns the loss coefficient for any conical pipe diffuser. This calculation has four methods available. The 'Rennels' [1]_ formulas are as follows (three different formulas are used, depending on the angle and the ratio of diameters): For 0 to 20 degrees, all aspect ratios: .. math:: K_1 = 8.30[\tan(\alpha/2)]^{1.75}(1-\beta^2)^2 + \frac{f(1-\beta^4)}{8\sin(\alpha/2)} For 20 to 60 degrees, beta < 0.5: .. math:: K_1 = \left\{1.366\sin\left[\frac{2\pi(\alpha-15^\circ)}{180}\right]^{0.5} - 0.170 - 3.28(0.0625-\beta^4)\sqrt{\frac{\alpha-20^\circ}{40^\circ}}\right\} (1-\beta^2)^2 + \frac{f(1-\beta^4)}{8\sin(\alpha/2)} For 20 to 60 degrees, beta >= 0.5: .. math:: K_1 = \left\{1.366\sin\left[\frac{2\pi(\alpha-15^\circ)}{180}\right]^{0.5} - 0.170 \right\}(1-\beta^2)^2 + \frac{f(1-\beta^4)}{8\sin(\alpha/2)} For 60 to 180 degrees, beta < 0.5: .. math:: K_1 = \left[1.205 - 3.28(0.0625-\beta^4)-12.8\beta^6\sqrt{\frac {\alpha-60^\circ}{120^\circ}}\right](1-\beta^2)^2 For 60 to 180 degrees, beta >= 0.5: .. math:: K_1 = \left[1.205 - 0.20\sqrt{\frac{\alpha-60^\circ}{120^\circ}} \right](1-\beta^2)^2 The Swamee [5]_ formula is: .. math:: K = \left\{\frac{0.25}{\theta^3}\left[1 + \frac{0.6}{r^{1.67}} \left(\frac{\pi-\theta}{\theta} \right) \right]^{0.533r - 2.6} \right\}^{-0.5} .. figure:: fittings/diffuser_conical.png :scale: 60 % :alt: diffuser conical; after [1]_ Parameters ---------- Di1 : float Inside diameter of original pipe (smaller), [m] Di2 : float Inside diameter of following pipe (larger), [m] l : float, optional Length of the contraction along the pipe axis, optional, [m] angle : float, optional Angle of contraction, [degrees] fd : float, optional Darcy friction factor [-] Re : float, optional Reynolds number of the pipe (used in Rennels method only if no friction factor given), [m] roughness : float, optional Roughness of bend wall (used in Rennel method if no friction factor given), [m] method : str The method to use for the calculation; one of 'Rennels', 'Crane', 'Miller', 'Swamee', or 'Idelchik' [-] Returns ------- K : float Loss coefficient with respect to smaller, upstream diameter [-] Notes ----- The Miller method changes around quite a bit. There is quite a bit of variance in the predictions of the methods, as demonstrated by the following figure. .. plot:: plots/diffuser_conical.py Examples -------- >>> diffuser_conical(Di1=1/3., Di2=1.0, angle=50.0, Re=1E6) 0.8027721093415322 References ---------- .. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012. .. [2] Idel’chik, I. E. Handbook of Hydraulic Resistance: Coefficients of Local Resistance and of Friction (Spravochnik Po Gidravlicheskim Soprotivleniyam, Koeffitsienty Mestnykh Soprotivlenii i Soprotivleniya Treniya). National technical information Service, 1966. .. [3] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane, 2009. .. [4] Swamee, Prabhata K., and Ashok K. Sharma. Design of Water Supply Pipe Networks. John Wiley & Sons, 2008. .. [5] Miller, Donald S. Internal Flow Systems: Design and Performance Prediction. Gulf Publishing Company, 1990. ''' beta = Di1/Di2 beta2 = beta*beta if angle is not None: angle_rad = radians(angle) l = (Di2 - Di1)/(2.0*tan(0.5*angle_rad)) elif l is not None: angle_rad = 2.0*atan(0.5*(Di2-Di1)/l) angle = degrees(angle_rad) else: raise Exception('Either `l` or `angle` must be specified') if method is None: method == 'Rennels' if method == 'Rennels': if fd is None: if Re is None: raise ValueError("The `Rennels` method requires either a " "specified friction factor or `Re`") fd = Colebrook(Re=Re, eD=roughness/Di2, tol=-1) if 0.0 < angle <= 20.0: K = 8.30*tan(0.5*angle_rad)**1.75*(1.0 - beta2)**2 + 0.125*fd*(1.0 - beta2*beta2)/sin(0.5*angle_rad) elif 20.0 < angle <= 60.0 and 0.0 <= beta < 0.5: K = (1.366*sin(2.0*pi*(angle - 15.0)/180.)**0.5 - 0.170 - 3.28*(0.0625-beta**4)*(0.025*(angle-20.0))**0.5)*(1.0 - beta2)**2 + 0.125*fd*(1.0 - beta2*beta2)/sin(0.5*angle_rad) elif 20.0 < angle <= 60.0 and beta >= 0.5: K = (1.366*sin(2.0*pi*(angle - 15.0)/180.0)**0.5 - 0.170)*(1.0 - beta2)**2 + 0.125*fd*(1.0 - beta2*beta2)/sin(0.5*angle_rad) elif 60.0 < angle <= 180.0 and 0.0 <= beta < 0.5: beta4 = beta2*beta2 K = (1.205 - 3.28*(0.0625 - beta4) - 12.8*beta4*beta2*((angle - 60.0)/120.)**0.5)*(1.0 - beta2)**2 elif 60.0 < angle <= 180.0 and beta >= 0.5: K = (1.205 - 0.20*((angle - 60.0)/120.)**0.5)*(1.0 - beta**2)**2 else: raise Exception('Conical diffuser inputs incorrect') return K elif method == 'Crane': return diffuser_conical_Crane(Di1=Di1, Di2=Di2, l=l, angle=angle) elif method == 'Miller': A_ratio = 1.0/beta2 if A_ratio > 4.0: A_ratio = 4.0 elif A_ratio < 1.1: A_ratio = 1.1 l_R1_ratio = l/(0.5*Di1) if l_R1_ratio < 0.1: l_R1_ratio = 0.1 elif l_R1_ratio > 20.0: l_R1_ratio = 20.0 Kd = max(float(bisplev(log(l_R1_ratio), log(A_ratio), tck_diffuser_conical_Miller)), 0) return Kd elif method == 'Idelchik': A_ratio = beta2 # Angles 0 to 20, ratios 0.05 to 0.06 if angle > 20.0: angle_fric = 20.0 elif angle < 2.0: angle_fric = 2.0 else: angle_fric = angle A_ratio_fric = A_ratio if A_ratio_fric < 0.05: A_ratio_fric = 0.05 elif A_ratio_fric > 0.6: A_ratio_fric = 0.6 K_fr = float(contraction_conical_frction_Idelchik_obj(angle_fric, A_ratio_fric)) K_exp = float(diffuser_conical_Idelchik_obj(min(0.6, A_ratio), max(3.0, angle))) return K_fr + K_exp elif method == 'Swamee': # Really starting to thing Swamee uses a different definition of loss coefficient! r = Di2/Di1 K = (0.25*angle_rad**-3*(1.0 + 0.6*r**(-1.67)*(pi-angle_rad)/angle_rad)**(0.533*r - 2.6))**-0.5 return K else: raise ValueError('Specified method not recognized; methods are %s' %(diffuser_conical_methods))
r'''Returns the loss coefficient for any conical pipe diffuser. This calculation has four methods available. The 'Rennels' [1]_ formulas are as follows (three different formulas are used, depending on the angle and the ratio of diameters): For 0 to 20 degrees, all aspect ratios: .. math:: K_1 = 8.30[\tan(\alpha/2)]^{1.75}(1-\beta^2)^2 + \frac{f(1-\beta^4)}{8\sin(\alpha/2)} For 20 to 60 degrees, beta < 0.5: .. math:: K_1 = \left\{1.366\sin\left[\frac{2\pi(\alpha-15^\circ)}{180}\right]^{0.5} - 0.170 - 3.28(0.0625-\beta^4)\sqrt{\frac{\alpha-20^\circ}{40^\circ}}\right\} (1-\beta^2)^2 + \frac{f(1-\beta^4)}{8\sin(\alpha/2)} For 20 to 60 degrees, beta >= 0.5: .. math:: K_1 = \left\{1.366\sin\left[\frac{2\pi(\alpha-15^\circ)}{180}\right]^{0.5} - 0.170 \right\}(1-\beta^2)^2 + \frac{f(1-\beta^4)}{8\sin(\alpha/2)} For 60 to 180 degrees, beta < 0.5: .. math:: K_1 = \left[1.205 - 3.28(0.0625-\beta^4)-12.8\beta^6\sqrt{\frac {\alpha-60^\circ}{120^\circ}}\right](1-\beta^2)^2 For 60 to 180 degrees, beta >= 0.5: .. math:: K_1 = \left[1.205 - 0.20\sqrt{\frac{\alpha-60^\circ}{120^\circ}} \right](1-\beta^2)^2 The Swamee [5]_ formula is: .. math:: K = \left\{\frac{0.25}{\theta^3}\left[1 + \frac{0.6}{r^{1.67}} \left(\frac{\pi-\theta}{\theta} \right) \right]^{0.533r - 2.6} \right\}^{-0.5} .. figure:: fittings/diffuser_conical.png :scale: 60 % :alt: diffuser conical; after [1]_ Parameters ---------- Di1 : float Inside diameter of original pipe (smaller), [m] Di2 : float Inside diameter of following pipe (larger), [m] l : float, optional Length of the contraction along the pipe axis, optional, [m] angle : float, optional Angle of contraction, [degrees] fd : float, optional Darcy friction factor [-] Re : float, optional Reynolds number of the pipe (used in Rennels method only if no friction factor given), [m] roughness : float, optional Roughness of bend wall (used in Rennel method if no friction factor given), [m] method : str The method to use for the calculation; one of 'Rennels', 'Crane', 'Miller', 'Swamee', or 'Idelchik' [-] Returns ------- K : float Loss coefficient with respect to smaller, upstream diameter [-] Notes ----- The Miller method changes around quite a bit. There is quite a bit of variance in the predictions of the methods, as demonstrated by the following figure. .. plot:: plots/diffuser_conical.py Examples -------- >>> diffuser_conical(Di1=1/3., Di2=1.0, angle=50.0, Re=1E6) 0.8027721093415322 References ---------- .. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012. .. [2] Idel’chik, I. E. Handbook of Hydraulic Resistance: Coefficients of Local Resistance and of Friction (Spravochnik Po Gidravlicheskim Soprotivleniyam, Koeffitsienty Mestnykh Soprotivlenii i Soprotivleniya Treniya). National technical information Service, 1966. .. [3] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane, 2009. .. [4] Swamee, Prabhata K., and Ashok K. Sharma. Design of Water Supply Pipe Networks. John Wiley & Sons, 2008. .. [5] Miller, Donald S. Internal Flow Systems: Design and Performance Prediction. Gulf Publishing Company, 1990.
def print_err(*args, **kwargs): """ A wrapper for print() that uses stderr by default. """ if kwargs.get('file', None) is None: kwargs['file'] = sys.stderr color = dict_pop_or(kwargs, 'color', True) # Use color if asked, but only if the file is a tty. if color and kwargs['file'].isatty(): # Keep any Colr args passed, convert strs into Colrs. msg = kwargs.get('sep', ' ').join( str(a) if isinstance(a, C) else str(C(a, 'red')) for a in args ) else: # The file is not a tty anyway, no escape codes. msg = kwargs.get('sep', ' ').join( str(a.stripped() if isinstance(a, C) else a) for a in args ) newline = dict_pop_or(kwargs, 'newline', False) if newline: msg = '\n{}'.format(msg) print(msg, **kwargs)
A wrapper for print() that uses stderr by default.
def get_as(self, cls: Type[MaybeBytesT]) -> Sequence[MaybeBytesT]: """Return the list of parsed objects.""" _ = cls # noqa return cast(Sequence[MaybeBytesT], self.items)
Return the list of parsed objects.
def optionally_with_args(phase, **kwargs): """Apply only the args that the phase knows. If the phase has a **kwargs-style argument, it counts as knowing all args. Args: phase: phase_descriptor.PhaseDescriptor or PhaseGroup or callable, or iterable of those, the phase or phase group (or iterable) to apply with_args to. **kwargs: arguments to apply to the phase. Returns: phase_descriptor.PhaseDescriptor or PhaseGroup or iterable with the updated args. """ if isinstance(phase, PhaseGroup): return phase.with_args(**kwargs) if isinstance(phase, collections.Iterable): return [optionally_with_args(p, **kwargs) for p in phase] if not isinstance(phase, phase_descriptor.PhaseDescriptor): phase = phase_descriptor.PhaseDescriptor.wrap_or_copy(phase) return phase.with_known_args(**kwargs)
Apply only the args that the phase knows. If the phase has a **kwargs-style argument, it counts as knowing all args. Args: phase: phase_descriptor.PhaseDescriptor or PhaseGroup or callable, or iterable of those, the phase or phase group (or iterable) to apply with_args to. **kwargs: arguments to apply to the phase. Returns: phase_descriptor.PhaseDescriptor or PhaseGroup or iterable with the updated args.
def fix(csvfile): '''Apply a fix (ie. remove plain names)''' header('Apply fixes from {}', csvfile.name) bads = [] reader = csv.reader(csvfile) reader.next() # Skip header for id, _, sources, dests in reader: advice = Advice.objects.get(id=id) sources = [s.strip() for s in sources.split(',') if s.strip()] dests = [d.strip() for d in dests.split(',') if d.strip()] if not len(sources) == len(dests): bads.append(id) continue for source, dest in zip(sources, dests): echo('{0}: Replace {1} with {2}', white(id), white(source), white(dest)) advice.subject = advice.subject.replace(source, dest) advice.content = advice.content.replace(source, dest) advice.save() index(advice) for id in bads: echo('{0}: Replacements length not matching', white(id)) success('Done')
Apply a fix (ie. remove plain names)
def outgoing_caller_ids(self): """ Access the outgoing_caller_ids :returns: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdList :rtype: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdList """ if self._outgoing_caller_ids is None: self._outgoing_caller_ids = OutgoingCallerIdList(self._version, account_sid=self._solution['sid'], ) return self._outgoing_caller_ids
Access the outgoing_caller_ids :returns: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdList :rtype: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdList
def get_autoscaling_group_properties(asg_client, env, service): """ Gets the autoscaling group properties based on the service name that is provided. This function will attempt the find the autoscaling group base on the following logic: 1. If the service name provided matches the autoscaling group name 2. If the service name provided matches the Name tag of the autoscaling group 3. If the service name provided does not match the above, return None Args: clients: Instantiated boto3 autoscaling client env: Name of the environment to search for the autoscaling group service: Name of the service Returns: JSON object of the autoscaling group properties if it exists """ try: # See if {{ENV}}-{{SERVICE}} matches ASG name response = asg_client.describe_auto_scaling_groups(AutoScalingGroupNames=["{}-{}".format(env, service)]) if len(response["AutoScalingGroups"]) == 0: # See if {{ENV}}-{{SERVICE}} matches ASG tag name response = asg_client.describe_tags(Filters=[{ "Name": "Key", "Values": ["Name"] }, { "Name": "Value", "Values": ["{}-{}".format(env, service)]}]) if len(response["Tags"]) == 0: # Query does not match either of the above, return None return None else: asg_name = response["Tags"][0]["ResourceId"] response = asg_client.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name]) return response["AutoScalingGroups"] else: return response["AutoScalingGroups"] except ClientError as error: raise RuntimeError("Error in finding autoscaling group {} {}".format(env, service), error)
Gets the autoscaling group properties based on the service name that is provided. This function will attempt the find the autoscaling group base on the following logic: 1. If the service name provided matches the autoscaling group name 2. If the service name provided matches the Name tag of the autoscaling group 3. If the service name provided does not match the above, return None Args: clients: Instantiated boto3 autoscaling client env: Name of the environment to search for the autoscaling group service: Name of the service Returns: JSON object of the autoscaling group properties if it exists
def mode_number(self, rows: List[Row], column: NumberColumn) -> Number: """ Takes a list of rows and a column and returns the most frequent value under that column in those rows. """ most_frequent_list = self._get_most_frequent_values(rows, column) if not most_frequent_list: return 0.0 # type: ignore most_frequent_value = most_frequent_list[0] if not isinstance(most_frequent_value, Number): raise ExecutionError(f"Invalid valus for mode_number: {most_frequent_value}") return most_frequent_value
Takes a list of rows and a column and returns the most frequent value under that column in those rows.
def _conflicted_data_points(L): """Returns an indicator vector where ith element = 1 if x_i is labeled by at least two LFs that give it disagreeing labels.""" m = sparse.diags(np.ravel(L.max(axis=1).todense())) return np.ravel(np.max(m @ (L != 0) != L, axis=1).astype(int).todense())
Returns an indicator vector where ith element = 1 if x_i is labeled by at least two LFs that give it disagreeing labels.
def check_schema_coverage(doc, schema): ''' FORWARD CHECK OF DOCUMENT This routine looks at each element in the doc, and makes sure there is a matching 'name' in the schema at that level. ''' error_list = [] to_delete = [] for entry in doc.list_tuples(): (name, value, index, seq) = entry temp_schema = schema_match_up(doc, schema) if not name in temp_schema.list_values("name"): error_list.append( ("[error]", "doc", seq, "a name of '{}' not found in schema".format(name)) ) to_delete.append(seq) else: # check subs el = check_schema_coverage(doc[name, value, index], temp_schema["name", name]) error_list.extend(el) for seq in to_delete: doc.seq_delete(seq) return error_list
FORWARD CHECK OF DOCUMENT This routine looks at each element in the doc, and makes sure there is a matching 'name' in the schema at that level.
def _write_passphrase(stream, passphrase, encoding): """Write the passphrase from memory to the GnuPG process' stdin. :type stream: file, :class:`~io.BytesIO`, or :class:`~io.StringIO` :param stream: The input file descriptor to write the password to. :param str passphrase: The passphrase for the secret key material. :param str encoding: The data encoding expected by GnuPG. Usually, this is ``sys.getfilesystemencoding()``. """ passphrase = '%s\n' % passphrase passphrase = passphrase.encode(encoding) stream.write(passphrase) log.debug("Wrote passphrase on stdin.")
Write the passphrase from memory to the GnuPG process' stdin. :type stream: file, :class:`~io.BytesIO`, or :class:`~io.StringIO` :param stream: The input file descriptor to write the password to. :param str passphrase: The passphrase for the secret key material. :param str encoding: The data encoding expected by GnuPG. Usually, this is ``sys.getfilesystemencoding()``.
def autoprefixer(input, **kw): """Run autoprefixer""" cmd = '%s -b "%s" %s' % (current_app.config.get('AUTOPREFIXER_BIN'), current_app.config.get('AUTOPREFIXER_BROWSERS'), input) subprocess.call(cmd, shell=True)
Run autoprefixer
def _parse_nicknameinuse(client, command, actor, args): """Parse a NICKNAMEINUSE message and dispatch an event. The parameter passed along with the event is the nickname which is already in use. """ nick, _, _ = args.rpartition(" ") client.dispatch_event("NICKNAMEINUSE", nick)
Parse a NICKNAMEINUSE message and dispatch an event. The parameter passed along with the event is the nickname which is already in use.
def assign_objective_requisite(self, objective_id, requisite_objective_id): """Creates a requirement dependency between two ``Objectives``. arg: objective_id (osid.id.Id): the ``Id`` of the dependent ``Objective`` arg: requisite_objective_id (osid.id.Id): the ``Id`` of the required ``Objective`` raise: AlreadyExists - ``objective_id`` already mapped to ``requisite_objective_id`` raise: NotFound - ``objective_id`` or ``requisite_objective_id`` not found raise: NullArgument - ``objective_id`` or ``requisite_objective_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ requisite_type = Type(**Relationship().get_type_data('OBJECTIVE.REQUISITE')) ras = self._get_provider_manager( 'RELATIONSHIP').get_relationship_admin_session_for_family( self.get_objective_bank_id(), proxy=self._proxy) rfc = ras.get_relationship_form_for_create(objective_id, requisite_objective_id, []) rfc.set_display_name('Objective Requisite') rfc.set_description('An Objective Requisite created by the ObjectiveRequisiteAssignmentSession') rfc.set_genus_type(requisite_type) ras.create_relationship(rfc)
Creates a requirement dependency between two ``Objectives``. arg: objective_id (osid.id.Id): the ``Id`` of the dependent ``Objective`` arg: requisite_objective_id (osid.id.Id): the ``Id`` of the required ``Objective`` raise: AlreadyExists - ``objective_id`` already mapped to ``requisite_objective_id`` raise: NotFound - ``objective_id`` or ``requisite_objective_id`` not found raise: NullArgument - ``objective_id`` or ``requisite_objective_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def write_byte(self, address, value): """Writes the byte to unaddressed register in a device. """ LOGGER.debug("Writing byte %s to device %s!", bin(value), hex(address)) return self.driver.write_byte(address, value)
Writes the byte to unaddressed register in a device.
def restore_state(self, state): """Restore the current state of this emulated object. Args: state (dict): A previously dumped state produced by dump_state. """ super(ReferenceController, self).restore_state(state) state_name = state.get('state_name') state_version = state.get('state_version') if state_name != self.STATE_NAME or state_version != self.STATE_VERSION: raise ArgumentError("Invalid emulated device state name or version", found=(state_name, state_version), expected=(self.STATE_NAME, self.STATE_VERSION)) self.app_info = state.get('app_info', (0, "0.0")) self.os_info = state.get('os_info', (0, "0.0")) # Notify all subsystems of our intent to restore in case they need to prepare self.sensor_log.prepare_for_restore() # Restore all of the subsystems self.remote_bridge.restore(state.get('remote_bridge', {})) self.tile_manager.restore(state.get('tile_manager', {})) self.config_database.restore(state.get('config_database', {})) self.sensor_log.restore(state.get('sensor_log', {}))
Restore the current state of this emulated object. Args: state (dict): A previously dumped state produced by dump_state.
def parse(self, data): # type: (bytes) -> None ''' Parse the passed in data into a UDF ICB Tag. Parameters: data - The data to parse. Returns: Nothing. ''' if self._initialized: raise pycdlibexception.PyCdlibInternalError('UDF ICB Tag already initialized') (self.prior_num_direct_entries, self.strategy_type, self.strategy_param, self.max_num_entries, reserved, self.file_type, self.parent_icb_log_block_num, self.parent_icb_part_ref_num, self.flags) = struct.unpack_from(self.FMT, data, 0) if self.strategy_type not in (4, 4096): raise pycdlibexception.PyCdlibInvalidISO('UDF ICB Tag invalid strategy type') if reserved != 0: raise pycdlibexception.PyCdlibInvalidISO('UDF ICB Tag reserved not 0') self._initialized = True
Parse the passed in data into a UDF ICB Tag. Parameters: data - The data to parse. Returns: Nothing.
def handle_cf_error(error_pointer): """ Checks a CFErrorRef and throws an exception if there is an error to report :param error_pointer: A CFErrorRef :raises: OSError - when the CFErrorRef contains an error """ if is_null(error_pointer): return error = unwrap(error_pointer) if is_null(error): return cf_string_domain = CoreFoundation.CFErrorGetDomain(error) domain = CFHelpers.cf_string_to_unicode(cf_string_domain) CoreFoundation.CFRelease(cf_string_domain) num = CoreFoundation.CFErrorGetCode(error) cf_string_ref = CoreFoundation.CFErrorCopyDescription(error) output = CFHelpers.cf_string_to_unicode(cf_string_ref) CoreFoundation.CFRelease(cf_string_ref) if output is None: if domain == 'NSOSStatusErrorDomain': code_map = { -2147416010: 'ACL add failed', -2147416025: 'ACL base certs not supported', -2147416019: 'ACL challenge callback failed', -2147416015: 'ACL change failed', -2147416012: 'ACL delete failed', -2147416017: 'ACL entry tag not found', -2147416011: 'ACL replace failed', -2147416021: 'ACL subject type not supported', -2147415789: 'Algid mismatch', -2147415726: 'Already logged in', -2147415040: 'Apple add application ACL subject', -2147415036: 'Apple invalid key end date', -2147415037: 'Apple invalid key start date', -2147415039: 'Apple public key incomplete', -2147415038: 'Apple signature mismatch', -2147415034: 'Apple SSLv2 rollback', -2147415802: 'Attach handle busy', -2147415731: 'Block size mismatch', -2147415722: 'Crypto data callback failed', -2147415804: 'Device error', -2147415835: 'Device failed', -2147415803: 'Device memory error', -2147415836: 'Device reset', -2147415728: 'Device verify failed', -2147416054: 'Function failed', -2147416057: 'Function not implemented', -2147415807: 'Input length error', -2147415837: 'Insufficient client identification', -2147416063: 'Internal error', -2147416027: 'Invalid access credentials', -2147416026: 'Invalid ACL base certs', -2147416020: 'Invalid ACL challenge callback', -2147416016: 'Invalid ACL edit mode', -2147416018: 'Invalid ACL entry tag', -2147416022: 'Invalid ACL subject value', -2147415759: 'Invalid algorithm', -2147415678: 'Invalid attr access credentials', -2147415704: 'Invalid attr alg params', -2147415686: 'Invalid attr base', -2147415738: 'Invalid attr block size', -2147415680: 'Invalid attr dl db handle', -2147415696: 'Invalid attr effective bits', -2147415692: 'Invalid attr end date', -2147415752: 'Invalid attr init vector', -2147415682: 'Invalid attr iteration count', -2147415754: 'Invalid attr key', -2147415740: 'Invalid attr key length', -2147415700: 'Invalid attr key type', -2147415702: 'Invalid attr label', -2147415698: 'Invalid attr mode', -2147415708: 'Invalid attr output size', -2147415748: 'Invalid attr padding', -2147415742: 'Invalid attr passphrase', -2147415688: 'Invalid attr prime', -2147415674: 'Invalid attr private key format', -2147415676: 'Invalid attr public key format', -2147415746: 'Invalid attr random', -2147415706: 'Invalid attr rounds', -2147415750: 'Invalid attr salt', -2147415744: 'Invalid attr seed', -2147415694: 'Invalid attr start date', -2147415684: 'Invalid attr subprime', -2147415672: 'Invalid attr symmetric key format', -2147415690: 'Invalid attr version', -2147415670: 'Invalid attr wrapped key format', -2147415760: 'Invalid context', -2147416000: 'Invalid context handle', -2147415976: 'Invalid crypto data', -2147415994: 'Invalid data', -2147415768: 'Invalid data count', -2147415723: 'Invalid digest algorithm', -2147416059: 'Invalid input pointer', -2147415766: 'Invalid input vector', -2147415792: 'Invalid key', -2147415780: 'Invalid keyattr mask', -2147415782: 'Invalid keyusage mask', -2147415790: 'Invalid key class', -2147415776: 'Invalid key format', -2147415778: 'Invalid key label', -2147415783: 'Invalid key pointer', -2147415791: 'Invalid key reference', -2147415727: 'Invalid login name', -2147416014: 'Invalid new ACL entry', -2147416013: 'Invalid new ACL owner', -2147416058: 'Invalid output pointer', -2147415765: 'Invalid output vector', -2147415978: 'Invalid passthrough id', -2147416060: 'Invalid pointer', -2147416024: 'Invalid sample value', -2147415733: 'Invalid signature', -2147415787: 'Key blob type incorrect', -2147415786: 'Key header inconsistent', -2147415724: 'Key label already exists', -2147415788: 'Key usage incorrect', -2147416061: 'Mds error', -2147416062: 'Memory error', -2147415677: 'Missing attr access credentials', -2147415703: 'Missing attr alg params', -2147415685: 'Missing attr base', -2147415737: 'Missing attr block size', -2147415679: 'Missing attr dl db handle', -2147415695: 'Missing attr effective bits', -2147415691: 'Missing attr end date', -2147415751: 'Missing attr init vector', -2147415681: 'Missing attr iteration count', -2147415753: 'Missing attr key', -2147415739: 'Missing attr key length', -2147415699: 'Missing attr key type', -2147415701: 'Missing attr label', -2147415697: 'Missing attr mode', -2147415707: 'Missing attr output size', -2147415747: 'Missing attr padding', -2147415741: 'Missing attr passphrase', -2147415687: 'Missing attr prime', -2147415673: 'Missing attr private key format', -2147415675: 'Missing attr public key format', -2147415745: 'Missing attr random', -2147415705: 'Missing attr rounds', -2147415749: 'Missing attr salt', -2147415743: 'Missing attr seed', -2147415693: 'Missing attr start date', -2147415683: 'Missing attr subprime', -2147415671: 'Missing attr symmetric key format', -2147415689: 'Missing attr version', -2147415669: 'Missing attr wrapped key format', -2147415801: 'Not logged in', -2147415840: 'No user interaction', -2147416029: 'Object ACL not supported', -2147416028: 'Object ACL required', -2147416030: 'Object manip auth denied', -2147416031: 'Object use auth denied', -2147416032: 'Operation auth denied', -2147416055: 'OS access denied', -2147415806: 'Output length error', -2147415725: 'Private key already exists', -2147415730: 'Private key not found', -2147415989: 'Privilege not granted', -2147415805: 'Privilege not supported', -2147415729: 'Public key inconsistent', -2147415732: 'Query size unknown', -2147416023: 'Sample value not supported', -2147416056: 'Self check failed', -2147415838: 'Service not available', -2147415736: 'Staged operation in progress', -2147415735: 'Staged operation not started', -2147415779: 'Unsupported keyattr mask', -2147415781: 'Unsupported keyusage mask', -2147415785: 'Unsupported key format', -2147415777: 'Unsupported key label', -2147415784: 'Unsupported key size', -2147415839: 'User canceled', -2147415767: 'Vector of bufs unsupported', -2147415734: 'Verify failed', } if num in code_map: output = code_map[num] if not output: output = '%s %s' % (domain, num) raise OSError(output)
Checks a CFErrorRef and throws an exception if there is an error to report :param error_pointer: A CFErrorRef :raises: OSError - when the CFErrorRef contains an error
def list_models(self, limit=-1, offset=-1): """Get a list of models in the registry. Parameters ---------- limit : int Limit number of items in the result set offset : int Set offset in list (order as defined by object store) Returns ------- list(ModelHandle) """ return self.registry.list_models(limit=limit, offset=offset)
Get a list of models in the registry. Parameters ---------- limit : int Limit number of items in the result set offset : int Set offset in list (order as defined by object store) Returns ------- list(ModelHandle)
def disconnect(self, signal=None, slot=None, transform=None, condition=None): """Removes connection(s) between this objects signal and connected slot(s) signal: the signal this class will emit, to cause the slot method to be called receiver: the object containing the slot method to be called slot: the slot method or function to call transform: an optional value override to pass into the slot method as the first variable condition: only call the slot method if the value emitted matches this condition """ if slot: self.connections[signal][condition].pop(slot, None) elif condition is not None: self.connections[signal].pop(condition, None) elif signal: self.connections.pop(signal, None) else: delattr(self, 'connections')
Removes connection(s) between this objects signal and connected slot(s) signal: the signal this class will emit, to cause the slot method to be called receiver: the object containing the slot method to be called slot: the slot method or function to call transform: an optional value override to pass into the slot method as the first variable condition: only call the slot method if the value emitted matches this condition
def attach_bytes(key, the_bytes): """Adds a ModuleAttachment to the current graph. Args: key: A string with the unique key of the attachment. the_bytes: A bytes object with the serialized attachment. """ tf_v1.add_to_collection( _ATTACHMENT_COLLECTION_INTERNAL, module_attachment_pb2.ModuleAttachment(key=key, value=the_bytes))
Adds a ModuleAttachment to the current graph. Args: key: A string with the unique key of the attachment. the_bytes: A bytes object with the serialized attachment.
def setCustomColorRamp(self, colors=[], interpolatedPoints=10): """ Accepts a list of RGB tuples and interpolates between them to create a custom color ramp. Returns the color ramp as a list of RGB tuples. """ self._colorRamp = ColorRampGenerator.generateCustomColorRamp(colors, interpolatedPoints)
Accepts a list of RGB tuples and interpolates between them to create a custom color ramp. Returns the color ramp as a list of RGB tuples.
def from_utf8(buf, errors='replace'): """ Decodes a UTF-8 compatible, ASCII string into a unicode object. `buf` string or unicode string to convert. Returns unicode` string. * Raises a ``UnicodeDecodeError`` exception if encoding failed and `errors` isn't set to 'replace'. """ if isinstance(buf, unicode): return buf else: return unicode(buf, 'utf-8', errors)
Decodes a UTF-8 compatible, ASCII string into a unicode object. `buf` string or unicode string to convert. Returns unicode` string. * Raises a ``UnicodeDecodeError`` exception if encoding failed and `errors` isn't set to 'replace'.
def pause(self, instance_id, keep_provisioned=True): """shuts down the instance without destroying it. The AbstractCloudProvider class uses 'stop' to refer to destroying a VM, so use 'pause' to mean powering it down while leaving it allocated. :param str instance_id: instance identifier :return: None """ try: if self._paused: log.debug("node %s is already paused", instance_id) return self._paused = True post_shutdown_action = 'Stopped' if keep_provisioned else \ 'StoppedDeallocated' result = self._subscription._sms.shutdown_role( service_name=self._cloud_service._name, deployment_name=self._cloud_service._name, role_name=self._qualified_name, post_shutdown_action=post_shutdown_action) self._subscription._wait_result(result) except Exception as exc: log.error("error pausing instance %s: %s", instance_id, exc) raise log.debug('paused instance(instance_id=%s)', instance_id)
shuts down the instance without destroying it. The AbstractCloudProvider class uses 'stop' to refer to destroying a VM, so use 'pause' to mean powering it down while leaving it allocated. :param str instance_id: instance identifier :return: None
def xmlrpc_provision(self, app_id, path_to_cert_or_cert, environment, timeout=15): """ Starts an APNSService for the this app_id and keeps it running Arguments: app_id the app_id to provision for APNS path_to_cert_or_cert absolute path to the APNS SSL cert or a string containing the .pem file environment either 'sandbox' or 'production' timeout seconds to timeout connection attempts to the APNS server Returns: None """ if environment not in ('sandbox', 'production'): raise xmlrpc.Fault(401, 'Invalid environment provided `%s`. Valid ' 'environments are `sandbox` and `production`' % ( environment,)) if not app_id in self.app_ids: # log.msg('provisioning ' + app_id + ' environment ' + environment) self.app_ids[app_id] = APNSService(path_to_cert_or_cert, environment, timeout)
Starts an APNSService for the this app_id and keeps it running Arguments: app_id the app_id to provision for APNS path_to_cert_or_cert absolute path to the APNS SSL cert or a string containing the .pem file environment either 'sandbox' or 'production' timeout seconds to timeout connection attempts to the APNS server Returns: None
def read_json(self, params=None): """Get information about the current entity. Call :meth:`read_raw`. Check the response status code, decode JSON and return the decoded JSON as a dict. :return: A dict. The server's response, with all JSON decoded. :raises: ``requests.exceptions.HTTPError`` if the response has an HTTP 4XX or 5XX status code. :raises: ``ValueError`` If the response JSON can not be decoded. """ response = self.read_raw(params=params) response.raise_for_status() return response.json()
Get information about the current entity. Call :meth:`read_raw`. Check the response status code, decode JSON and return the decoded JSON as a dict. :return: A dict. The server's response, with all JSON decoded. :raises: ``requests.exceptions.HTTPError`` if the response has an HTTP 4XX or 5XX status code. :raises: ``ValueError`` If the response JSON can not be decoded.
def asyncPipeRegex(context=None, _INPUT=None, conf=None, **kwargs): """An operator that asynchronously replaces text in items using regexes. Each has the general format: "In [field] replace [match] with [replace]". Not loopable. Parameters ---------- context : pipe2py.Context object _INPUT : asyncPipe like object (twisted Deferred iterable of items) conf : { 'RULE': [ { 'field': {'value': <'search field'>}, 'match': {'value': <'regex'>}, 'replace': {'value': <'replacement'>}, 'globalmatch': {'value': '1'}, 'singlelinematch': {'value': '2'}, 'multilinematch': {'value': '4'}, 'casematch': {'value': '8'} } ] } Returns ------- _OUTPUT : twisted.internet.defer.Deferred generator of items """ splits = yield asyncGetSplits(_INPUT, conf['RULE'], **cdicts(opts, kwargs)) asyncConvert = partial(maybeDeferred, convert_func) asyncFuncs = get_async_dispatch_funcs('pass', asyncConvert) parsed = yield asyncDispatch(splits, *asyncFuncs) _OUTPUT = yield maybeDeferred(parse_results, parsed) returnValue(iter(_OUTPUT))
An operator that asynchronously replaces text in items using regexes. Each has the general format: "In [field] replace [match] with [replace]". Not loopable. Parameters ---------- context : pipe2py.Context object _INPUT : asyncPipe like object (twisted Deferred iterable of items) conf : { 'RULE': [ { 'field': {'value': <'search field'>}, 'match': {'value': <'regex'>}, 'replace': {'value': <'replacement'>}, 'globalmatch': {'value': '1'}, 'singlelinematch': {'value': '2'}, 'multilinematch': {'value': '4'}, 'casematch': {'value': '8'} } ] } Returns ------- _OUTPUT : twisted.internet.defer.Deferred generator of items
def handle_sketch_version(msg): """Process an internal sketch version message.""" if not msg.gateway.is_sensor(msg.node_id): return None msg.gateway.sensors[msg.node_id].sketch_version = msg.payload msg.gateway.alert(msg) return None
Process an internal sketch version message.
def read_config(config_fname=None): """Parse input configuration file and return a config dict.""" if not config_fname: config_fname = DEFAULT_CONFIG_FNAME try: with open(config_fname, 'r') as config_file: config = yaml.load(config_file) except IOError as exc: if exc.errno == errno.ENOENT: print('payu: warning: Configuration file {0} not found!' .format(config_fname)) config = {} else: raise collate_config = config.pop('collate', {}) # Transform legacy collate config options if type(collate_config) is bool: collate_config = {'enable': collate_config} collatestr = 'collate_' foundkeys = [] # Cycle through old collate config and convert to newer dict format for key in list(config.keys()): if key.startswith(collatestr): foundkeys.append(key) collate_config[key[len(collatestr):]] = config.pop(key) if foundkeys: print("Use of these keys is deprecated: {}.".format( ", ".join(foundkeys))) print("Instead use collate dictionary and subkey " "without 'collate_' prefix") config['collate'] = collate_config return config
Parse input configuration file and return a config dict.