id_within_dataset
int64 46
2.71M
| snippet
stringlengths 63
481k
| tokens
sequencelengths 20
15.6k
| language
stringclasses 2
values | nl
stringlengths 1
32.4k
| is_duplicated
bool 2
classes |
---|---|---|---|---|---|
1,615,516 | def date_range(cls,start_time,end_time,freq):
'''
Returns a new SArray that represents a fixed frequency datetime index.
Parameters
----------
start_time : datetime.datetime
Left bound for generating dates.
end_time : datetime.datetime
Right bound for generating dates.
freq : datetime.timedelta
Fixed frequency between two consecutive data points.
Returns
-------
out : SArray
Examples
--------
>>> import datetime as dt
>>> start = dt.datetime(2013, 5, 7, 10, 4, 10)
>>> end = dt.datetime(2013, 5, 10, 10, 4, 10)
>>> sa = tc.SArray.date_range(start,end,dt.timedelta(1))
>>> print sa
dtype: datetime
Rows: 4
[datetime.datetime(2013, 5, 7, 10, 4, 10),
datetime.datetime(2013, 5, 8, 10, 4, 10),
datetime.datetime(2013, 5, 9, 10, 4, 10),
datetime.datetime(2013, 5, 10, 10, 4, 10)]
'''
if not isinstance(start_time,datetime.datetime):
raise TypeError("The ``start_time`` argument must be from type datetime.datetime.")
if not isinstance(end_time,datetime.datetime):
raise TypeError("The ``end_time`` argument must be from type datetime.datetime.")
if not isinstance(freq,datetime.timedelta):
raise TypeError("The ``freq`` argument must be from type datetime.timedelta.")
from .. import extensions
return extensions.date_range(start_time,end_time,freq.total_seconds()) | [
"def",
"date_range",
"(",
"cls",
",",
"start_time",
",",
"end_time",
",",
"freq",
")",
":",
"if",
"not",
"isinstance",
"(",
"start_time",
",",
"datetime",
".",
"datetime",
")",
":",
"raise",
"TypeError",
"(",
"\"The ``start_time`` argument must be from type datetime.datetime.\"",
")",
"if",
"not",
"isinstance",
"(",
"end_time",
",",
"datetime",
".",
"datetime",
")",
":",
"raise",
"TypeError",
"(",
"\"The ``end_time`` argument must be from type datetime.datetime.\"",
")",
"if",
"not",
"isinstance",
"(",
"freq",
",",
"datetime",
".",
"timedelta",
")",
":",
"raise",
"TypeError",
"(",
"\"The ``freq`` argument must be from type datetime.timedelta.\"",
")",
"from",
".",
".",
"import",
"extensions",
"return",
"extensions",
".",
"date_range",
"(",
"start_time",
",",
"end_time",
",",
"freq",
".",
"total_seconds",
"(",
")",
")"
] | python | Returns a new SArray that represents a fixed frequency datetime index.
Parameters
----------
start_time : datetime.datetime
Left bound for generating dates.
end_time : datetime.datetime
Right bound for generating dates.
freq : datetime.timedelta
Fixed frequency between two consecutive data points.
Returns
-------
out : SArray
Examples
--------
>>> import datetime as dt
>>> start = dt.datetime(2013, 5, 7, 10, 4, 10)
>>> end = dt.datetime(2013, 5, 10, 10, 4, 10)
>>> sa = tc.SArray.date_range(start,end,dt.timedelta(1))
>>> print sa
dtype: datetime
Rows: 4
[datetime.datetime(2013, 5, 7, 10, 4, 10),
datetime.datetime(2013, 5, 8, 10, 4, 10),
datetime.datetime(2013, 5, 9, 10, 4, 10),
datetime.datetime(2013, 5, 10, 10, 4, 10)] | false |
2,334,998 | def runSearchRnaQuantificationSets(self, request):
"""
Returns a SearchRnaQuantificationSetsResponse for the specified
SearchRnaQuantificationSetsRequest object.
"""
return self.runSearchRequest(
request, protocol.SearchRnaQuantificationSetsRequest,
protocol.SearchRnaQuantificationSetsResponse,
self.rnaQuantificationSetsGenerator) | [
"def",
"runSearchRnaQuantificationSets",
"(",
"self",
",",
"request",
")",
":",
"return",
"self",
".",
"runSearchRequest",
"(",
"request",
",",
"protocol",
".",
"SearchRnaQuantificationSetsRequest",
",",
"protocol",
".",
"SearchRnaQuantificationSetsResponse",
",",
"self",
".",
"rnaQuantificationSetsGenerator",
")"
] | python | Returns a SearchRnaQuantificationSetsResponse for the specified
SearchRnaQuantificationSetsRequest object. | false |
2,670,858 | def get_edit_scripts(pron_a, pron_b, edit_costs=(1.0, 1.0, 1.0)):
"""Get the edit scripts to transform between two given pronunciations.
:param pron_a: Source pronunciation as list of strings, each string corresponding to a phoneme
:param pron_b: Target pronunciation as list of strings, each string corresponding to a phoneme
:param edit_costs: Costs of insert, replace and delete respectively
:return: List of edit scripts. Each edit script is represented as a list of operations,
where each operation is a dictionary.
"""
op_costs = {'insert': lambda x: edit_costs[0],
'match': lambda x, y: 0 if x == y else edit_costs[1],
'delete': lambda x: edit_costs[2]}
distance, scripts, costs, ops = edit_distance.best_transforms(pron_a, pron_b, op_costs=op_costs)
return [full_edit_script(script.to_primitive()) for script in scripts] | [
"def",
"get_edit_scripts",
"(",
"pron_a",
",",
"pron_b",
",",
"edit_costs",
"=",
"(",
"1.0",
",",
"1.0",
",",
"1.0",
")",
")",
":",
"op_costs",
"=",
"{",
"'insert'",
":",
"lambda",
"x",
":",
"edit_costs",
"[",
"0",
"]",
",",
"'match'",
":",
"lambda",
"x",
",",
"y",
":",
"0",
"if",
"x",
"==",
"y",
"else",
"edit_costs",
"[",
"1",
"]",
",",
"'delete'",
":",
"lambda",
"x",
":",
"edit_costs",
"[",
"2",
"]",
"}",
"distance",
",",
"scripts",
",",
"costs",
",",
"ops",
"=",
"edit_distance",
".",
"best_transforms",
"(",
"pron_a",
",",
"pron_b",
",",
"op_costs",
"=",
"op_costs",
")",
"return",
"[",
"full_edit_script",
"(",
"script",
".",
"to_primitive",
"(",
")",
")",
"for",
"script",
"in",
"scripts",
"]"
] | python | Get the edit scripts to transform between two given pronunciations.
:param pron_a: Source pronunciation as list of strings, each string corresponding to a phoneme
:param pron_b: Target pronunciation as list of strings, each string corresponding to a phoneme
:param edit_costs: Costs of insert, replace and delete respectively
:return: List of edit scripts. Each edit script is represented as a list of operations,
where each operation is a dictionary. | false |
1,614,924 | def textMerge(self, second):
"""Merge two text nodes into one """
if second is None: second__o = None
else: second__o = second._o
ret = libxml2mod.xmlTextMerge(self._o, second__o)
if ret is None:raise treeError('xmlTextMerge() failed')
__tmp = xmlNode(_obj=ret)
return __tmp | [
"def",
"textMerge",
"(",
"self",
",",
"second",
")",
":",
"if",
"second",
"is",
"None",
":",
"second__o",
"=",
"None",
"else",
":",
"second__o",
"=",
"second",
".",
"_o",
"ret",
"=",
"libxml2mod",
".",
"xmlTextMerge",
"(",
"self",
".",
"_o",
",",
"second__o",
")",
"if",
"ret",
"is",
"None",
":",
"raise",
"treeError",
"(",
"'xmlTextMerge() failed'",
")",
"__tmp",
"=",
"xmlNode",
"(",
"_obj",
"=",
"ret",
")",
"return",
"__tmp"
] | python | Merge two text nodes into one | false |
1,898,448 | def get_account(self, account, use_sis_id=False, **kwargs):
"""
Retrieve information on an individual account.
:calls: `GET /api/v1/accounts/:id \
<https://canvas.instructure.com/doc/api/accounts.html#method.accounts.show>`_
:param account: The object or ID of the account to retrieve.
:type account: int, str or :class:`canvasapi.account.Account`
:param use_sis_id: Whether or not account_id is an sis ID.
Defaults to `False`.
:type use_sis_id: bool
:rtype: :class:`canvasapi.account.Account`
"""
if use_sis_id:
account_id = account
uri_str = 'accounts/sis_account_id:{}'
else:
account_id = obj_or_id(account, "account", (Account,))
uri_str = 'accounts/{}'
response = self.__requester.request(
'GET',
uri_str.format(account_id),
_kwargs=combine_kwargs(**kwargs)
)
return Account(self.__requester, response.json()) | [
"def",
"get_account",
"(",
"self",
",",
"account",
",",
"use_sis_id",
"=",
"False",
",",
"**",
"kwargs",
")",
":",
"if",
"use_sis_id",
":",
"account_id",
"=",
"account",
"uri_str",
"=",
"'accounts/sis_account_id:{}'",
"else",
":",
"account_id",
"=",
"obj_or_id",
"(",
"account",
",",
"\"account\"",
",",
"(",
"Account",
",",
")",
")",
"uri_str",
"=",
"'accounts/{}'",
"response",
"=",
"self",
".",
"__requester",
".",
"request",
"(",
"'GET'",
",",
"uri_str",
".",
"format",
"(",
"account_id",
")",
",",
"_kwargs",
"=",
"combine_kwargs",
"(",
"**",
"kwargs",
")",
")",
"return",
"Account",
"(",
"self",
".",
"__requester",
",",
"response",
".",
"json",
"(",
")",
")"
] | python | Retrieve information on an individual account.
:calls: `GET /api/v1/accounts/:id \
<https://canvas.instructure.com/doc/api/accounts.html#method.accounts.show>`_
:param account: The object or ID of the account to retrieve.
:type account: int, str or :class:`canvasapi.account.Account`
:param use_sis_id: Whether or not account_id is an sis ID.
Defaults to `False`.
:type use_sis_id: bool
:rtype: :class:`canvasapi.account.Account` | false |
1,734,209 | def predict_expectation(self, X):
"""
Compute the expected lifetime, E[T], using covariates X.
Parameters
----------
X: a (n,d) covariate numpy array or DataFrame
If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
Returns the expected lifetimes for the individuals
"""
index = _get_index(X)
t = self._index
return pd.DataFrame(trapz(self.predict_survival_function(X)[index].values.T, t), index=index) | [
"def",
"predict_expectation",
"(",
"self",
",",
"X",
")",
":",
"index",
"=",
"_get_index",
"(",
"X",
")",
"t",
"=",
"self",
".",
"_index",
"return",
"pd",
".",
"DataFrame",
"(",
"trapz",
"(",
"self",
".",
"predict_survival_function",
"(",
"X",
")",
"[",
"index",
"]",
".",
"values",
".",
"T",
",",
"t",
")",
",",
"index",
"=",
"index",
")"
] | python | Compute the expected lifetime, E[T], using covariates X.
Parameters
----------
X: a (n,d) covariate numpy array or DataFrame
If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
Returns the expected lifetimes for the individuals | false |
2,125,022 | def format(self, info_dict, delimiter='/'):
"""
This formatter will take a data structure that
represent a tree and will print all the paths
from the root to the leaves
in our case it will print each value and the keys
that needed to get to it, for example:
vm0:
net: lago
memory: 1024
will be output as:
vm0/net/lago
vm0/memory/1024
Args:
info_dict (dict): information to reformat
delimiter (str): a delimiter for the path components
Returns:
str: String representing the formatted info
"""
def dfs(father, path, acc):
if isinstance(father, list):
for child in father:
dfs(child, path, acc)
elif isinstance(father, collections.Mapping):
for child in sorted(father.items(), key=itemgetter(0)), :
dfs(child, path, acc)
elif isinstance(father, tuple):
path = copy.copy(path)
path.append(father[0])
dfs(father[1], path, acc)
else:
# join the last key with it's value
path[-1] = '{}: {}'.format(path[-1], str(father))
acc.append(delimiter.join(path))
result = []
dfs(info_dict.get('Prefix') or info_dict, [], result)
return '\n'.join(result) | [
"def",
"format",
"(",
"self",
",",
"info_dict",
",",
"delimiter",
"=",
"'/'",
")",
":",
"def",
"dfs",
"(",
"father",
",",
"path",
",",
"acc",
")",
":",
"if",
"isinstance",
"(",
"father",
",",
"list",
")",
":",
"for",
"child",
"in",
"father",
":",
"dfs",
"(",
"child",
",",
"path",
",",
"acc",
")",
"elif",
"isinstance",
"(",
"father",
",",
"collections",
".",
"Mapping",
")",
":",
"for",
"child",
"in",
"sorted",
"(",
"father",
".",
"items",
"(",
")",
",",
"key",
"=",
"itemgetter",
"(",
"0",
")",
")",
",",
":",
"dfs",
"(",
"child",
",",
"path",
",",
"acc",
")",
"elif",
"isinstance",
"(",
"father",
",",
"tuple",
")",
":",
"path",
"=",
"copy",
".",
"copy",
"(",
"path",
")",
"path",
".",
"append",
"(",
"father",
"[",
"0",
"]",
")",
"dfs",
"(",
"father",
"[",
"1",
"]",
",",
"path",
",",
"acc",
")",
"else",
":",
"path",
"[",
"-",
"1",
"]",
"=",
"'{}: {}'",
".",
"format",
"(",
"path",
"[",
"-",
"1",
"]",
",",
"str",
"(",
"father",
")",
")",
"acc",
".",
"append",
"(",
"delimiter",
".",
"join",
"(",
"path",
")",
")",
"result",
"=",
"[",
"]",
"dfs",
"(",
"info_dict",
".",
"get",
"(",
"'Prefix'",
")",
"or",
"info_dict",
",",
"[",
"]",
",",
"result",
")",
"return",
"'\\n'",
".",
"join",
"(",
"result",
")"
] | python | This formatter will take a data structure that
represent a tree and will print all the paths
from the root to the leaves
in our case it will print each value and the keys
that needed to get to it, for example:
vm0:
net: lago
memory: 1024
will be output as:
vm0/net/lago
vm0/memory/1024
Args:
info_dict (dict): information to reformat
delimiter (str): a delimiter for the path components
Returns:
str: String representing the formatted info | false |
2,512,800 | def db_snapshot_append(cls, cur, block_id, consensus_hash, ops_hash, timestamp):
"""
Append hash info for the last block processed, and the time at which it was done.
Meant to be executed as part of a transaction.
Return True on success
Raise an exception on invalid block number
Abort on db error
"""
query = 'INSERT INTO snapshots (block_id,consensus_hash,ops_hash,timestamp) VALUES (?,?,?,?);'
args = (block_id,consensus_hash,ops_hash,timestamp)
cls.db_query_execute(cur, query, args)
return True | [
"def",
"db_snapshot_append",
"(",
"cls",
",",
"cur",
",",
"block_id",
",",
"consensus_hash",
",",
"ops_hash",
",",
"timestamp",
")",
":",
"query",
"=",
"'INSERT INTO snapshots (block_id,consensus_hash,ops_hash,timestamp) VALUES (?,?,?,?);'",
"args",
"=",
"(",
"block_id",
",",
"consensus_hash",
",",
"ops_hash",
",",
"timestamp",
")",
"cls",
".",
"db_query_execute",
"(",
"cur",
",",
"query",
",",
"args",
")",
"return",
"True"
] | python | Append hash info for the last block processed, and the time at which it was done.
Meant to be executed as part of a transaction.
Return True on success
Raise an exception on invalid block number
Abort on db error | false |
2,269,066 | def _validateDirectives(self, directiveList, checkFileName):
if len(directiveList) == 0:
raise ParsingException("'{file}' does not contain any CHECK directives".format(file=checkFileName))
from . import Directives
"""
We should enforce for every CHECK-NOT and CHECK-NOT-L directive that the next directive (if it exists) is
a CHECK or CHECK-L directive
"""
last = len(directiveList) -1
supportedDirectives = [ Directives.Check, Directives.CheckLiteral ]
for (index,directive) in enumerate(directiveList):
if isA(directive, [Directives.CheckNot, Directives.CheckNotLiteral]):
if index < last:
after = directiveList[index +1]
if not isA(after, supportedDirectives):
requiredTypes = " or ".join( [ "CHECK{suffix}".format(suffix=d.directiveToken()) for d in supportedDirectives])
raise ParsingException("{directive} must have a {requiredTypes} directive after it instead of a {bad}".format(
directive=directive,
requiredTypes=requiredTypes,
check=Directives.Check.directiveToken(),
bad=after)
) | [
"def",
"_validateDirectives",
"(",
"self",
",",
"directiveList",
",",
"checkFileName",
")",
":",
"if",
"len",
"(",
"directiveList",
")",
"==",
"0",
":",
"raise",
"ParsingException",
"(",
"\"'{file}' does not contain any CHECK directives\"",
".",
"format",
"(",
"file",
"=",
"checkFileName",
")",
")",
"from",
".",
"import",
"Directives",
"last",
"=",
"len",
"(",
"directiveList",
")",
"-",
"1",
"supportedDirectives",
"=",
"[",
"Directives",
".",
"Check",
",",
"Directives",
".",
"CheckLiteral",
"]",
"for",
"(",
"index",
",",
"directive",
")",
"in",
"enumerate",
"(",
"directiveList",
")",
":",
"if",
"isA",
"(",
"directive",
",",
"[",
"Directives",
".",
"CheckNot",
",",
"Directives",
".",
"CheckNotLiteral",
"]",
")",
":",
"if",
"index",
"<",
"last",
":",
"after",
"=",
"directiveList",
"[",
"index",
"+",
"1",
"]",
"if",
"not",
"isA",
"(",
"after",
",",
"supportedDirectives",
")",
":",
"requiredTypes",
"=",
"\" or \"",
".",
"join",
"(",
"[",
"\"CHECK{suffix}\"",
".",
"format",
"(",
"suffix",
"=",
"d",
".",
"directiveToken",
"(",
")",
")",
"for",
"d",
"in",
"supportedDirectives",
"]",
")",
"raise",
"ParsingException",
"(",
"\"{directive} must have a {requiredTypes} directive after it instead of a {bad}\"",
".",
"format",
"(",
"directive",
"=",
"directive",
",",
"requiredTypes",
"=",
"requiredTypes",
",",
"check",
"=",
"Directives",
".",
"Check",
".",
"directiveToken",
"(",
")",
",",
"bad",
"=",
"after",
")",
")"
] | python | We should enforce for every CHECK-NOT and CHECK-NOT-L directive that the next directive (if it exists) is
a CHECK or CHECK-L directive | false |
2,095,922 | def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""
Register a new url rule. Acts the same as :meth:`flask.Flask.add_url_rule`.
"""
self._defer(lambda app: app.add_url_rule(rule,
endpoint=endpoint,
view_func=view_func,
**options)) | [
"def",
"add_url_rule",
"(",
"self",
",",
"rule",
",",
"endpoint",
"=",
"None",
",",
"view_func",
"=",
"None",
",",
"**",
"options",
")",
":",
"self",
".",
"_defer",
"(",
"lambda",
"app",
":",
"app",
".",
"add_url_rule",
"(",
"rule",
",",
"endpoint",
"=",
"endpoint",
",",
"view_func",
"=",
"view_func",
",",
"**",
"options",
")",
")"
] | python | Register a new url rule. Acts the same as :meth:`flask.Flask.add_url_rule`. | false |
1,595,684 | def format_string(self, s, args, kwargs):
"""If a format call is detected, then this is routed through this
method so that our safety sandbox can be used for it.
"""
if isinstance(s, Markup):
formatter = SandboxedEscapeFormatter(self, s.escape)
else:
formatter = SandboxedFormatter(self)
kwargs = _MagicFormatMapping(args, kwargs)
rv = formatter.vformat(s, args, kwargs)
return type(s)(rv) | [
"def",
"format_string",
"(",
"self",
",",
"s",
",",
"args",
",",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"s",
",",
"Markup",
")",
":",
"formatter",
"=",
"SandboxedEscapeFormatter",
"(",
"self",
",",
"s",
".",
"escape",
")",
"else",
":",
"formatter",
"=",
"SandboxedFormatter",
"(",
"self",
")",
"kwargs",
"=",
"_MagicFormatMapping",
"(",
"args",
",",
"kwargs",
")",
"rv",
"=",
"formatter",
".",
"vformat",
"(",
"s",
",",
"args",
",",
"kwargs",
")",
"return",
"type",
"(",
"s",
")",
"(",
"rv",
")"
] | python | If a format call is detected, then this is routed through this
method so that our safety sandbox can be used for it. | false |
1,844,170 | def to_networkx(self):
"""Return a NetworkX Graph object representing the minimum spanning tree.
Edge weights in the graph are the distance between the nodes they connect.
Nodes have a `data` attribute attached giving the data vector of the
associated point.
"""
try:
from networkx import Graph, set_node_attributes
except ImportError:
raise ImportError('You must have networkx installed to export networkx graphs')
result = Graph()
for row in self._mst:
result.add_edge(row[0], row[1], weight=row[2])
data_dict = {index: tuple(row) for index, row in enumerate(self._data)}
set_node_attributes(result, data_dict, 'data')
return result | [
"def",
"to_networkx",
"(",
"self",
")",
":",
"try",
":",
"from",
"networkx",
"import",
"Graph",
",",
"set_node_attributes",
"except",
"ImportError",
":",
"raise",
"ImportError",
"(",
"'You must have networkx installed to export networkx graphs'",
")",
"result",
"=",
"Graph",
"(",
")",
"for",
"row",
"in",
"self",
".",
"_mst",
":",
"result",
".",
"add_edge",
"(",
"row",
"[",
"0",
"]",
",",
"row",
"[",
"1",
"]",
",",
"weight",
"=",
"row",
"[",
"2",
"]",
")",
"data_dict",
"=",
"{",
"index",
":",
"tuple",
"(",
"row",
")",
"for",
"index",
",",
"row",
"in",
"enumerate",
"(",
"self",
".",
"_data",
")",
"}",
"set_node_attributes",
"(",
"result",
",",
"data_dict",
",",
"'data'",
")",
"return",
"result"
] | python | Return a NetworkX Graph object representing the minimum spanning tree.
Edge weights in the graph are the distance between the nodes they connect.
Nodes have a `data` attribute attached giving the data vector of the
associated point. | false |
2,472,976 | def timer(fun, *a, **k):
""" define a timer for a rule function
for log and statistic purposes """
@wraps(fun)
def timer(*a, **k):
start = arrow.now()
ret = fun(*a, **k)
end = arrow.now()
print('timer:fun: %s\n start:%s,end:%s, took [%s]' % (
str(fun), str(start), str(end), str(end - start)))
return ret
return timer | [
"def",
"timer",
"(",
"fun",
",",
"*",
"a",
",",
"**",
"k",
")",
":",
"@",
"wraps",
"(",
"fun",
")",
"def",
"timer",
"(",
"*",
"a",
",",
"**",
"k",
")",
":",
"start",
"=",
"arrow",
".",
"now",
"(",
")",
"ret",
"=",
"fun",
"(",
"*",
"a",
",",
"**",
"k",
")",
"end",
"=",
"arrow",
".",
"now",
"(",
")",
"print",
"(",
"'timer:fun: %s\\n start:%s,end:%s, took [%s]'",
"%",
"(",
"str",
"(",
"fun",
")",
",",
"str",
"(",
"start",
")",
",",
"str",
"(",
"end",
")",
",",
"str",
"(",
"end",
"-",
"start",
")",
")",
")",
"return",
"ret",
"return",
"timer"
] | python | define a timer for a rule function
for log and statistic purposes | false |
2,496,793 | def __output_see(self, see):
""" Convert the argument to a @see tag to rest """
if see.startswith('<a href'):
# HTML link -- <a href="...">...</a>
return self.__html_to_rst(see)
elif '"' in see:
# Plain text
return see
else:
# Type reference (default)
return ':java:ref:`%s`' % (see.replace('#', '.').replace(' ', ''),) | [
"def",
"__output_see",
"(",
"self",
",",
"see",
")",
":",
"if",
"see",
".",
"startswith",
"(",
"'<a href'",
")",
":",
"return",
"self",
".",
"__html_to_rst",
"(",
"see",
")",
"elif",
"'\"'",
"in",
"see",
":",
"return",
"see",
"else",
":",
"return",
"':java:ref:`%s`'",
"%",
"(",
"see",
".",
"replace",
"(",
"'#'",
",",
"'.'",
")",
".",
"replace",
"(",
"' '",
",",
"''",
")",
",",
")"
] | python | Convert the argument to a @see tag to rest | false |
2,323,709 | def symmetric_difference(self, other):
"""
Combine with another Region by performing the symmetric difference of their pixlists.
Requires both regions to have the same maxdepth.
Parameters
----------
other : :class:`AegeanTools.regions.Region`
The region to be combined.
"""
# work only on the lowest level
# TODO: Allow this to be done for regions with different depths.
if not (self.maxdepth == other.maxdepth): raise AssertionError("Regions must have the same maxdepth")
self._demote_all()
opd = set(other.get_demoted())
self.pixeldict[self.maxdepth].symmetric_difference_update(opd)
self._renorm()
return | [
"def",
"symmetric_difference",
"(",
"self",
",",
"other",
")",
":",
"if",
"not",
"(",
"self",
".",
"maxdepth",
"==",
"other",
".",
"maxdepth",
")",
":",
"raise",
"AssertionError",
"(",
"\"Regions must have the same maxdepth\"",
")",
"self",
".",
"_demote_all",
"(",
")",
"opd",
"=",
"set",
"(",
"other",
".",
"get_demoted",
"(",
")",
")",
"self",
".",
"pixeldict",
"[",
"self",
".",
"maxdepth",
"]",
".",
"symmetric_difference_update",
"(",
"opd",
")",
"self",
".",
"_renorm",
"(",
")",
"return"
] | python | Combine with another Region by performing the symmetric difference of their pixlists.
Requires both regions to have the same maxdepth.
Parameters
----------
other : :class:`AegeanTools.regions.Region`
The region to be combined. | false |
1,778,162 | def transceive(self, data, timeout=None):
"""Transmit arbitrary data and receive the response.
This is a low level method to send arbitrary data to the
tag. While it should almost always be better to use
:meth:`send_apdu` this is the only way to force a specific
timeout value (which is otherwise derived from the Tag's
answer to select). The *timeout* value is expected as a float
specifying the seconds to wait.
"""
log.debug(">> {0}".format(hexlify(data)))
data = self._dep.exchange(data, timeout)
log.debug("<< {0}".format(hexlify(data) if data else "None"))
return data | [
"def",
"transceive",
"(",
"self",
",",
"data",
",",
"timeout",
"=",
"None",
")",
":",
"log",
".",
"debug",
"(",
"\">> {0}\"",
".",
"format",
"(",
"hexlify",
"(",
"data",
")",
")",
")",
"data",
"=",
"self",
".",
"_dep",
".",
"exchange",
"(",
"data",
",",
"timeout",
")",
"log",
".",
"debug",
"(",
"\"<< {0}\"",
".",
"format",
"(",
"hexlify",
"(",
"data",
")",
"if",
"data",
"else",
"\"None\"",
")",
")",
"return",
"data"
] | python | Transmit arbitrary data and receive the response.
This is a low level method to send arbitrary data to the
tag. While it should almost always be better to use
:meth:`send_apdu` this is the only way to force a specific
timeout value (which is otherwise derived from the Tag's
answer to select). The *timeout* value is expected as a float
specifying the seconds to wait. | false |
1,789,788 | def _sort_itemstrs(items, itemstrs):
"""
Equivalent to `sorted(items)` except if `items` are unorderable, then
string values are used to define an ordering.
"""
# First try to sort items by their normal values
# If that doesnt work, then sort by their string values
import ubelt as ub
try:
# Set ordering is not unique. Sort by strings values instead.
if _peek_isinstance(items, (set, frozenset)):
raise TypeError
sortx = ub.argsort(items)
except TypeError:
sortx = ub.argsort(itemstrs)
itemstrs = [itemstrs[x] for x in sortx]
return itemstrs | [
"def",
"_sort_itemstrs",
"(",
"items",
",",
"itemstrs",
")",
":",
"import",
"ubelt",
"as",
"ub",
"try",
":",
"if",
"_peek_isinstance",
"(",
"items",
",",
"(",
"set",
",",
"frozenset",
")",
")",
":",
"raise",
"TypeError",
"sortx",
"=",
"ub",
".",
"argsort",
"(",
"items",
")",
"except",
"TypeError",
":",
"sortx",
"=",
"ub",
".",
"argsort",
"(",
"itemstrs",
")",
"itemstrs",
"=",
"[",
"itemstrs",
"[",
"x",
"]",
"for",
"x",
"in",
"sortx",
"]",
"return",
"itemstrs"
] | python | Equivalent to `sorted(items)` except if `items` are unorderable, then
string values are used to define an ordering. | false |
1,923,129 | def UNIFAC_groups(self):
r'''Dictionary of UNIFAC subgroup: count groups for the original
UNIFAC subgroups, as determined by `DDBST's online service <http://www.ddbst.com/unifacga.html>`_.
Examples
--------
>>> pprint(Chemical('Cumene').UNIFAC_groups)
{1: 2, 9: 5, 13: 1}
'''
if self.__UNIFAC_groups:
return self.__UNIFAC_groups
else:
load_group_assignments_DDBST()
if self.InChI_Key in DDBST_UNIFAC_assignments:
self.__UNIFAC_groups = DDBST_UNIFAC_assignments[self.InChI_Key]
return self.__UNIFAC_groups
else:
return None | [
"def",
"UNIFAC_groups",
"(",
"self",
")",
":",
"if",
"self",
".",
"__UNIFAC_groups",
":",
"return",
"self",
".",
"__UNIFAC_groups",
"else",
":",
"load_group_assignments_DDBST",
"(",
")",
"if",
"self",
".",
"InChI_Key",
"in",
"DDBST_UNIFAC_assignments",
":",
"self",
".",
"__UNIFAC_groups",
"=",
"DDBST_UNIFAC_assignments",
"[",
"self",
".",
"InChI_Key",
"]",
"return",
"self",
".",
"__UNIFAC_groups",
"else",
":",
"return",
"None"
] | python | r'''Dictionary of UNIFAC subgroup: count groups for the original
UNIFAC subgroups, as determined by `DDBST's online service <http://www.ddbst.com/unifacga.html>`_.
Examples
--------
>>> pprint(Chemical('Cumene').UNIFAC_groups)
{1: 2, 9: 5, 13: 1} | false |
1,868,286 | def scheduleServices(self, jobGraph):
"""
Schedule the services of a job asynchronously.
When the job's services are running the jobGraph for the job will
be returned by toil.leader.ServiceManager.getJobGraphsWhoseServicesAreRunning.
:param toil.jobGraph.JobGraph jobGraph: wrapper of job with services to schedule.
"""
# Add jobGraph to set being processed by the service manager
self.jobGraphsWithServicesBeingStarted.add(jobGraph)
# Add number of jobs managed by ServiceManager
self.jobsIssuedToServiceManager += sum(map(len, jobGraph.services)) + 1 # The plus one accounts for the root job
# Asynchronously schedule the services
self._jobGraphsWithServicesToStart.put(jobGraph) | [
"def",
"scheduleServices",
"(",
"self",
",",
"jobGraph",
")",
":",
"self",
".",
"jobGraphsWithServicesBeingStarted",
".",
"add",
"(",
"jobGraph",
")",
"self",
".",
"jobsIssuedToServiceManager",
"+=",
"sum",
"(",
"map",
"(",
"len",
",",
"jobGraph",
".",
"services",
")",
")",
"+",
"1",
"self",
".",
"_jobGraphsWithServicesToStart",
".",
"put",
"(",
"jobGraph",
")"
] | python | Schedule the services of a job asynchronously.
When the job's services are running the jobGraph for the job will
be returned by toil.leader.ServiceManager.getJobGraphsWhoseServicesAreRunning.
:param toil.jobGraph.JobGraph jobGraph: wrapper of job with services to schedule. | false |
2,061,160 | def _build_variant_label(
build, chromosome, position, reference_allele, variant_allele,
gene_symbols=None
):
"""
Function to build HGVS variant labels
:param build: {str} build id
:param chromosome: {str} chromosome
:param position: {str} variation position as string or int
:param reference_allele: {str} single letter ref bp
:param variant_allele: {str} single letter bp change
:param gene_symbol: {str} gene symbol (hgvs)
:return: {str} variant label
"""
variant_label = ''
prefix = ''
if gene_symbols and len(gene_symbols) == 1 and gene_symbols[0]:
prefix = "{0}{1}({2})".format(build, chromosome, gene_symbols[0])
else:
prefix = "{0}{1}".format(build, chromosome)
if reference_allele == '-':
variant_label = "{0}:g.{1}ins{2}".format(prefix, position, variant_allele)
elif variant_allele == '-':
variant_label = "{0}:g.{1}del{2}".format(
prefix, position, reference_allele)
else:
variant_label = "{0}:g.{1}{2}>{3}".format(
prefix, position, reference_allele, variant_allele)
return variant_label | [
"def",
"_build_variant_label",
"(",
"build",
",",
"chromosome",
",",
"position",
",",
"reference_allele",
",",
"variant_allele",
",",
"gene_symbols",
"=",
"None",
")",
":",
"variant_label",
"=",
"''",
"prefix",
"=",
"''",
"if",
"gene_symbols",
"and",
"len",
"(",
"gene_symbols",
")",
"==",
"1",
"and",
"gene_symbols",
"[",
"0",
"]",
":",
"prefix",
"=",
"\"{0}{1}({2})\"",
".",
"format",
"(",
"build",
",",
"chromosome",
",",
"gene_symbols",
"[",
"0",
"]",
")",
"else",
":",
"prefix",
"=",
"\"{0}{1}\"",
".",
"format",
"(",
"build",
",",
"chromosome",
")",
"if",
"reference_allele",
"==",
"'-'",
":",
"variant_label",
"=",
"\"{0}:g.{1}ins{2}\"",
".",
"format",
"(",
"prefix",
",",
"position",
",",
"variant_allele",
")",
"elif",
"variant_allele",
"==",
"'-'",
":",
"variant_label",
"=",
"\"{0}:g.{1}del{2}\"",
".",
"format",
"(",
"prefix",
",",
"position",
",",
"reference_allele",
")",
"else",
":",
"variant_label",
"=",
"\"{0}:g.{1}{2}>{3}\"",
".",
"format",
"(",
"prefix",
",",
"position",
",",
"reference_allele",
",",
"variant_allele",
")",
"return",
"variant_label"
] | python | Function to build HGVS variant labels
:param build: {str} build id
:param chromosome: {str} chromosome
:param position: {str} variation position as string or int
:param reference_allele: {str} single letter ref bp
:param variant_allele: {str} single letter bp change
:param gene_symbol: {str} gene symbol (hgvs)
:return: {str} variant label | false |
2,698,453 | def hub_history(self):
"""Get the Hub's history
Just like the Client, the Hub has a history, which is a list of msg_ids.
This will contain the history of all clients, and, depending on configuration,
may contain history across multiple cluster sessions.
Any msg_id returned here is a valid argument to `get_result`.
Returns
-------
msg_ids : list of strs
list of all msg_ids, ordered by task submission time.
"""
self.session.send(self._query_socket, "history_request", content={})
idents, msg = self.session.recv(self._query_socket, 0)
if self.debug:
pprint(msg)
content = msg['content']
if content['status'] != 'ok':
raise self._unwrap_exception(content)
else:
return content['history'] | [
"def",
"hub_history",
"(",
"self",
")",
":",
"self",
".",
"session",
".",
"send",
"(",
"self",
".",
"_query_socket",
",",
"\"history_request\"",
",",
"content",
"=",
"{",
"}",
")",
"idents",
",",
"msg",
"=",
"self",
".",
"session",
".",
"recv",
"(",
"self",
".",
"_query_socket",
",",
"0",
")",
"if",
"self",
".",
"debug",
":",
"pprint",
"(",
"msg",
")",
"content",
"=",
"msg",
"[",
"'content'",
"]",
"if",
"content",
"[",
"'status'",
"]",
"!=",
"'ok'",
":",
"raise",
"self",
".",
"_unwrap_exception",
"(",
"content",
")",
"else",
":",
"return",
"content",
"[",
"'history'",
"]"
] | python | Get the Hub's history
Just like the Client, the Hub has a history, which is a list of msg_ids.
This will contain the history of all clients, and, depending on configuration,
may contain history across multiple cluster sessions.
Any msg_id returned here is a valid argument to `get_result`.
Returns
-------
msg_ids : list of strs
list of all msg_ids, ordered by task submission time. | false |
1,712,519 | def LT(classical_reg1, classical_reg2, classical_reg3):
"""
Produce an LT instruction.
:param classical_reg1: Memory address to which to store the comparison result.
:param classical_reg2: Left comparison operand.
:param classical_reg3: Right comparison operand.
:return: A ClassicalLessThan instance.
"""
classical_reg1, classical_reg2, classical_reg3 = prepare_ternary_operands(classical_reg1,
classical_reg2,
classical_reg3)
return ClassicalLessThan(classical_reg1, classical_reg2, classical_reg3) | [
"def",
"LT",
"(",
"classical_reg1",
",",
"classical_reg2",
",",
"classical_reg3",
")",
":",
"classical_reg1",
",",
"classical_reg2",
",",
"classical_reg3",
"=",
"prepare_ternary_operands",
"(",
"classical_reg1",
",",
"classical_reg2",
",",
"classical_reg3",
")",
"return",
"ClassicalLessThan",
"(",
"classical_reg1",
",",
"classical_reg2",
",",
"classical_reg3",
")"
] | python | Produce an LT instruction.
:param classical_reg1: Memory address to which to store the comparison result.
:param classical_reg2: Left comparison operand.
:param classical_reg3: Right comparison operand.
:return: A ClassicalLessThan instance. | false |
1,935,861 | def diff_commonPrefix(self, text1, text2):
"""Determine the common prefix of two strings.
Args:
text1: First string.
text2: Second string.
Returns:
The number of characters common to the start of each string.
"""
# Quick check for common null cases.
if not text1 or not text2 or text1[0] != text2[0]:
return 0
# Binary search.
# Performance analysis: https://neil.fraser.name/news/2007/10/09/
pointermin = 0
pointermax = min(len(text1), len(text2))
pointermid = pointermax
pointerstart = 0
while pointermin < pointermid:
if text1[pointerstart:pointermid] == text2[pointerstart:pointermid]:
pointermin = pointermid
pointerstart = pointermin
else:
pointermax = pointermid
pointermid = (pointermax - pointermin) // 2 + pointermin
return pointermid | [
"def",
"diff_commonPrefix",
"(",
"self",
",",
"text1",
",",
"text2",
")",
":",
"if",
"not",
"text1",
"or",
"not",
"text2",
"or",
"text1",
"[",
"0",
"]",
"!=",
"text2",
"[",
"0",
"]",
":",
"return",
"0",
"pointermin",
"=",
"0",
"pointermax",
"=",
"min",
"(",
"len",
"(",
"text1",
")",
",",
"len",
"(",
"text2",
")",
")",
"pointermid",
"=",
"pointermax",
"pointerstart",
"=",
"0",
"while",
"pointermin",
"<",
"pointermid",
":",
"if",
"text1",
"[",
"pointerstart",
":",
"pointermid",
"]",
"==",
"text2",
"[",
"pointerstart",
":",
"pointermid",
"]",
":",
"pointermin",
"=",
"pointermid",
"pointerstart",
"=",
"pointermin",
"else",
":",
"pointermax",
"=",
"pointermid",
"pointermid",
"=",
"(",
"pointermax",
"-",
"pointermin",
")",
"//",
"2",
"+",
"pointermin",
"return",
"pointermid"
] | python | Determine the common prefix of two strings.
Args:
text1: First string.
text2: Second string.
Returns:
The number of characters common to the start of each string. | false |
2,216,466 | def download_file(request):
'''Create and download a zip file containing the media file.'''
if request.method == "GET":
if path.exists(settings.MEDIA_ROOT):
zipfile_name = 'media_%s.zip' % settings.SITE_NAME
in_memory_file = BytesIO()
media_zipfile = zipfile.ZipFile(in_memory_file, 'w',
zipfile.ZIP_DEFLATED)
directory_name = path.split(settings.MEDIA_ROOT)[-1]
for root, dirs, files in walk(directory_name):
for file in files:
media_zipfile.write(path.join(root, file))
media_zipfile.close()
resp = HttpResponse(in_memory_file.getvalue(),
content_type="application/x-zip-compressed")
resp['Content-Disposition'] = (
'attachment; filename=%s' % zipfile_name)
else:
resp = render(request,
'django_admin/transfer_media_message.html',
{'error_message':
'media file does not exist'})
else:
resp = HttpResponseNotAllowed(permitted_methods=['GET'])
return resp | [
"def",
"download_file",
"(",
"request",
")",
":",
"if",
"request",
".",
"method",
"==",
"\"GET\"",
":",
"if",
"path",
".",
"exists",
"(",
"settings",
".",
"MEDIA_ROOT",
")",
":",
"zipfile_name",
"=",
"'media_%s.zip'",
"%",
"settings",
".",
"SITE_NAME",
"in_memory_file",
"=",
"BytesIO",
"(",
")",
"media_zipfile",
"=",
"zipfile",
".",
"ZipFile",
"(",
"in_memory_file",
",",
"'w'",
",",
"zipfile",
".",
"ZIP_DEFLATED",
")",
"directory_name",
"=",
"path",
".",
"split",
"(",
"settings",
".",
"MEDIA_ROOT",
")",
"[",
"-",
"1",
"]",
"for",
"root",
",",
"dirs",
",",
"files",
"in",
"walk",
"(",
"directory_name",
")",
":",
"for",
"file",
"in",
"files",
":",
"media_zipfile",
".",
"write",
"(",
"path",
".",
"join",
"(",
"root",
",",
"file",
")",
")",
"media_zipfile",
".",
"close",
"(",
")",
"resp",
"=",
"HttpResponse",
"(",
"in_memory_file",
".",
"getvalue",
"(",
")",
",",
"content_type",
"=",
"\"application/x-zip-compressed\"",
")",
"resp",
"[",
"'Content-Disposition'",
"]",
"=",
"(",
"'attachment; filename=%s'",
"%",
"zipfile_name",
")",
"else",
":",
"resp",
"=",
"render",
"(",
"request",
",",
"'django_admin/transfer_media_message.html'",
",",
"{",
"'error_message'",
":",
"'media file does not exist'",
"}",
")",
"else",
":",
"resp",
"=",
"HttpResponseNotAllowed",
"(",
"permitted_methods",
"=",
"[",
"'GET'",
"]",
")",
"return",
"resp"
] | python | Create and download a zip file containing the media file. | false |
2,084,827 | def least_squares_effective_mass( cartesian_k_points, eigenvalues ):
"""
Calculate the effective mass using a least squares quadratic fit.
Args:
cartesian_k_points (np.array): Cartesian reciprocal coordinates for the k-points
eigenvalues (np.array): Energy eigenvalues at each k-point to be used in the fit.
Returns:
(float): The fitted effective mass
Notes:
If the k-points do not sit on a straight line a ValueError will be raised.
"""
if not points_are_in_a_straight_line( cartesian_k_points ):
raise ValueError( 'k-points are not collinear' )
dk = cartesian_k_points - cartesian_k_points[0]
mod_dk = np.linalg.norm( dk, axis = 1 )
delta_e = eigenvalues - eigenvalues[0]
effective_mass = 1.0 / ( np.polyfit( mod_dk, eigenvalues, 2 )[0] * ev_to_hartree * 2.0 )
return effective_mass | [
"def",
"least_squares_effective_mass",
"(",
"cartesian_k_points",
",",
"eigenvalues",
")",
":",
"if",
"not",
"points_are_in_a_straight_line",
"(",
"cartesian_k_points",
")",
":",
"raise",
"ValueError",
"(",
"'k-points are not collinear'",
")",
"dk",
"=",
"cartesian_k_points",
"-",
"cartesian_k_points",
"[",
"0",
"]",
"mod_dk",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"dk",
",",
"axis",
"=",
"1",
")",
"delta_e",
"=",
"eigenvalues",
"-",
"eigenvalues",
"[",
"0",
"]",
"effective_mass",
"=",
"1.0",
"/",
"(",
"np",
".",
"polyfit",
"(",
"mod_dk",
",",
"eigenvalues",
",",
"2",
")",
"[",
"0",
"]",
"*",
"ev_to_hartree",
"*",
"2.0",
")",
"return",
"effective_mass"
] | python | Calculate the effective mass using a least squares quadratic fit.
Args:
cartesian_k_points (np.array): Cartesian reciprocal coordinates for the k-points
eigenvalues (np.array): Energy eigenvalues at each k-point to be used in the fit.
Returns:
(float): The fitted effective mass
Notes:
If the k-points do not sit on a straight line a ValueError will be raised. | false |
1,991,397 | def default(self):
"""Returns the static value that this defaults to."""
if self.MUTABLE:
return copy.deepcopy(self._default)
else:
return self._default | [
"def",
"default",
"(",
"self",
")",
":",
"if",
"self",
".",
"MUTABLE",
":",
"return",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"_default",
")",
"else",
":",
"return",
"self",
".",
"_default"
] | python | Returns the static value that this defaults to. | false |
1,962,515 | def get(self, *, kind: Type=None, tag: Hashable=None, **kwargs) -> Iterator:
"""
Get an iterator of GameObjects by kind or tag.
kind: Any type. Pass to get a subset of contained GameObjects with the
given type.
tag: Any Hashable object. Pass to get a subset of contained GameObjects
with the given tag.
Pass both kind and tag to get objects that are both that type and that
tag.
Examples:
scene.get(type=MyGameObject)
scene.get(tag="red")
scene.get(type=MyGameObject, tag="red")
"""
return self.game_objects.get(kind=kind, tag=tag, **kwargs) | [
"def",
"get",
"(",
"self",
",",
"*",
",",
"kind",
":",
"Type",
"=",
"None",
",",
"tag",
":",
"Hashable",
"=",
"None",
",",
"**",
"kwargs",
")",
"->",
"Iterator",
":",
"return",
"self",
".",
"game_objects",
".",
"get",
"(",
"kind",
"=",
"kind",
",",
"tag",
"=",
"tag",
",",
"**",
"kwargs",
")"
] | python | Get an iterator of GameObjects by kind or tag.
kind: Any type. Pass to get a subset of contained GameObjects with the
given type.
tag: Any Hashable object. Pass to get a subset of contained GameObjects
with the given tag.
Pass both kind and tag to get objects that are both that type and that
tag.
Examples:
scene.get(type=MyGameObject)
scene.get(tag="red")
scene.get(type=MyGameObject, tag="red") | false |
2,114,432 | def grantham_score(ref_aa, mut_aa):
"""https://github.com/ashutoshkpandey/Annotation/blob/master/Grantham_score_calculator.py"""
grantham = {
'S': {'R': 110, 'L': 145, 'P': 74, 'T': 58, 'A': 99, 'V': 124, 'G': 56, 'I': 142, 'F': 155, 'Y': 144, 'C': 112,
'H': 89, 'Q': 68, 'N': 46, 'K': 121, 'D': 65, 'E': 80, 'M': 135, 'W': 177},
'R': {'R': 0, 'L': 102, 'P': 103, 'T': 71, 'A': 112, 'V': 96, 'G': 125, 'I': 97, 'F': 97, 'Y': 77, 'C': 180,
'H': 29, 'Q': 43, 'N': 86, 'K': 26, 'D': 96, 'E': 54, 'M': 91, 'W': 101, 'S': 0},
'L': {'R': 0, 'L': 0, 'P': 98, 'T': 92, 'A': 96, 'V': 32, 'G': 138, 'I': 5, 'F': 22, 'Y': 36, 'C': 198, 'H': 99,
'Q': 113, 'N': 153, 'K': 107, 'D': 172, 'E': 138, 'M': 15, 'W': 61, 'S': 0},
'P': {'R': 0, 'L': 0, 'P': 0, 'T': 38, 'A': 27, 'V': 68, 'G': 42, 'I': 95, 'F': 114, 'Y': 110, 'C': 169,
'H': 77, 'Q': 76, 'N': 91, 'K': 103, 'D': 108, 'E': 93, 'M': 87, 'W': 147, 'S': 0},
'T': {'R': 0, 'L': 0, 'P': 0, 'T': 0, 'A': 58, 'V': 69, 'G': 59, 'I': 89, 'F': 103, 'Y': 92, 'C': 149, 'H': 47,
'Q': 42, 'N': 65, 'K': 78, 'D': 85, 'E': 65, 'M': 81, 'W': 128, 'S': 0},
'A': {'R': 0, 'L': 0, 'P': 0, 'T': 0, 'A': 0, 'V': 64, 'G': 60, 'I': 94, 'F': 113, 'Y': 112, 'C': 195, 'H': 86,
'Q': 91, 'N': 111, 'K': 106, 'D': 126, 'E': 107, 'M': 84, 'W': 148, 'S': 0},
'V': {'R': 0, 'L': 0, 'P': 0, 'T': 0, 'A': 0, 'V': 0, 'G': 109, 'I': 29, 'F': 50, 'Y': 55, 'C': 192, 'H': 84,
'Q': 96, 'N': 133, 'K': 97, 'D': 152, 'E': 121, 'M': 21, 'W': 88, 'S': 0},
'G': {'R': 0, 'L': 0, 'P': 0, 'T': 0, 'A': 0, 'V': 0, 'G': 0, 'I': 135, 'F': 153, 'Y': 147, 'C': 159, 'H': 98,
'Q': 87, 'N': 80, 'K': 127, 'D': 94, 'E': 98, 'M': 127, 'W': 184, 'S': 0},
'I': {'R': 0, 'L': 0, 'P': 0, 'T': 0, 'A': 0, 'V': 0, 'G': 0, 'I': 0, 'F': 21, 'Y': 33, 'C': 198, 'H': 94,
'Q': 109, 'N': 149, 'K': 102, 'D': 168, 'E': 134, 'M': 10, 'W': 61, 'S': 0},
'F': {'R': 0, 'L': 0, 'P': 0, 'T': 0, 'A': 0, 'V': 0, 'G': 0, 'I': 0, 'F': 0, 'Y': 22, 'C': 205, 'H': 100,
'Q': 116, 'N': 158, 'K': 102, 'D': 177, 'E': 140, 'M': 28, 'W': 40, 'S': 0},
'Y': {'R': 0, 'L': 0, 'P': 0, 'T': 0, 'A': 0, 'V': 0, 'G': 0, 'I': 0, 'F': 0, 'Y': 0, 'C': 194, 'H': 83,
'Q': 99, 'N': 143, 'K': 85, 'D': 160, 'E': 122, 'M': 36, 'W': 37, 'S': 0},
'C': {'R': 0, 'L': 0, 'P': 0, 'T': 0, 'A': 0, 'V': 0, 'G': 0, 'I': 0, 'F': 0, 'Y': 0, 'C': 0, 'H': 174,
'Q': 154, 'N': 139, 'K': 202, 'D': 154, 'E': 170, 'M': 196, 'W': 215, 'S': 0},
'H': {'R': 0, 'L': 0, 'P': 0, 'T': 0, 'A': 0, 'V': 0, 'G': 0, 'I': 0, 'F': 0, 'Y': 0, 'C': 0, 'H': 0, 'Q': 24,
'N': 68, 'K': 32, 'D': 81, 'E': 40, 'M': 87, 'W': 115, 'S': 0},
'Q': {'R': 0, 'L': 0, 'P': 0, 'T': 0, 'A': 0, 'V': 0, 'G': 0, 'I': 0, 'F': 0, 'Y': 0, 'C': 0, 'H': 0, 'Q': 0,
'N': 46, 'K': 53, 'D': 61, 'E': 29, 'M': 101, 'W': 130, 'S': 0},
'N': {'R': 0, 'L': 0, 'P': 0, 'T': 0, 'A': 0, 'V': 0, 'G': 0, 'I': 0, 'F': 0, 'Y': 0, 'C': 0, 'H': 0, 'Q': 0,
'N': 0, 'K': 94, 'D': 23, 'E': 42, 'M': 142, 'W': 174, 'S': 0},
'K': {'R': 0, 'L': 0, 'P': 0, 'T': 0, 'A': 0, 'V': 0, 'G': 0, 'I': 0, 'F': 0, 'Y': 0, 'C': 0, 'H': 0, 'Q': 0,
'N': 0, 'K': 0, 'D': 101, 'E': 56, 'M': 95, 'W': 110, 'S': 0},
'D': {'R': 0, 'L': 0, 'P': 0, 'T': 0, 'A': 0, 'V': 0, 'G': 0, 'I': 0, 'F': 0, 'Y': 0, 'C': 0, 'H': 0, 'Q': 0,
'N': 0, 'K': 0, 'D': 0, 'E': 45, 'M': 160, 'W': 181, 'S': 0},
'E': {'R': 0, 'L': 0, 'P': 0, 'T': 0, 'A': 0, 'V': 0, 'G': 0, 'I': 0, 'F': 0, 'Y': 0, 'C': 0, 'H': 0, 'Q': 0,
'N': 0, 'K': 0, 'D': 0, 'E': 0, 'M': 126, 'W': 152, 'S': 0},
'M': {'R': 0, 'L': 0, 'P': 0, 'T': 0, 'A': 0, 'V': 0, 'G': 0, 'I': 0, 'F': 0, 'Y': 0, 'C': 0, 'H': 0, 'Q': 0,
'N': 0, 'K': 0, 'D': 0, 'E': 0, 'M': 0, 'W': 67, 'S': 0},
'W': {'R': 0, 'L': 0, 'P': 0, 'T': 0, 'A': 0, 'V': 0, 'G': 0, 'I': 0, 'F': 0, 'Y': 0, 'C': 0, 'H': 0, 'Q': 0,
'N': 0, 'K': 0, 'D': 0, 'E': 0, 'M': 0, 'W': 0, 'S': 0}}
score = 0
if ref_aa not in grantham or mut_aa not in grantham:
log.error('{} to {}: a residue is not in the Grantham matrix'.format(ref_aa, mut_aa))
return score, 'Unknown'
if ref_aa == mut_aa:
return score, 'Conservative'
else:
if int(grantham[ref_aa][mut_aa]) != 0:
score += int(grantham[ref_aa][mut_aa])
else:
score += int(grantham[mut_aa][ref_aa])
if score > 150:
return score, "Radical"
elif 150 >= score > 100:
return score, "Moderately Radical"
elif 100 >= score > 50:
return score, "Moderately Conservative"
else:
return score, "Conservative" | [
"def",
"grantham_score",
"(",
"ref_aa",
",",
"mut_aa",
")",
":",
"grantham",
"=",
"{",
"'S'",
":",
"{",
"'R'",
":",
"110",
",",
"'L'",
":",
"145",
",",
"'P'",
":",
"74",
",",
"'T'",
":",
"58",
",",
"'A'",
":",
"99",
",",
"'V'",
":",
"124",
",",
"'G'",
":",
"56",
",",
"'I'",
":",
"142",
",",
"'F'",
":",
"155",
",",
"'Y'",
":",
"144",
",",
"'C'",
":",
"112",
",",
"'H'",
":",
"89",
",",
"'Q'",
":",
"68",
",",
"'N'",
":",
"46",
",",
"'K'",
":",
"121",
",",
"'D'",
":",
"65",
",",
"'E'",
":",
"80",
",",
"'M'",
":",
"135",
",",
"'W'",
":",
"177",
"}",
",",
"'R'",
":",
"{",
"'R'",
":",
"0",
",",
"'L'",
":",
"102",
",",
"'P'",
":",
"103",
",",
"'T'",
":",
"71",
",",
"'A'",
":",
"112",
",",
"'V'",
":",
"96",
",",
"'G'",
":",
"125",
",",
"'I'",
":",
"97",
",",
"'F'",
":",
"97",
",",
"'Y'",
":",
"77",
",",
"'C'",
":",
"180",
",",
"'H'",
":",
"29",
",",
"'Q'",
":",
"43",
",",
"'N'",
":",
"86",
",",
"'K'",
":",
"26",
",",
"'D'",
":",
"96",
",",
"'E'",
":",
"54",
",",
"'M'",
":",
"91",
",",
"'W'",
":",
"101",
",",
"'S'",
":",
"0",
"}",
",",
"'L'",
":",
"{",
"'R'",
":",
"0",
",",
"'L'",
":",
"0",
",",
"'P'",
":",
"98",
",",
"'T'",
":",
"92",
",",
"'A'",
":",
"96",
",",
"'V'",
":",
"32",
",",
"'G'",
":",
"138",
",",
"'I'",
":",
"5",
",",
"'F'",
":",
"22",
",",
"'Y'",
":",
"36",
",",
"'C'",
":",
"198",
",",
"'H'",
":",
"99",
",",
"'Q'",
":",
"113",
",",
"'N'",
":",
"153",
",",
"'K'",
":",
"107",
",",
"'D'",
":",
"172",
",",
"'E'",
":",
"138",
",",
"'M'",
":",
"15",
",",
"'W'",
":",
"61",
",",
"'S'",
":",
"0",
"}",
",",
"'P'",
":",
"{",
"'R'",
":",
"0",
",",
"'L'",
":",
"0",
",",
"'P'",
":",
"0",
",",
"'T'",
":",
"38",
",",
"'A'",
":",
"27",
",",
"'V'",
":",
"68",
",",
"'G'",
":",
"42",
",",
"'I'",
":",
"95",
",",
"'F'",
":",
"114",
",",
"'Y'",
":",
"110",
",",
"'C'",
":",
"169",
",",
"'H'",
":",
"77",
",",
"'Q'",
":",
"76",
",",
"'N'",
":",
"91",
",",
"'K'",
":",
"103",
",",
"'D'",
":",
"108",
",",
"'E'",
":",
"93",
",",
"'M'",
":",
"87",
",",
"'W'",
":",
"147",
",",
"'S'",
":",
"0",
"}",
",",
"'T'",
":",
"{",
"'R'",
":",
"0",
",",
"'L'",
":",
"0",
",",
"'P'",
":",
"0",
",",
"'T'",
":",
"0",
",",
"'A'",
":",
"58",
",",
"'V'",
":",
"69",
",",
"'G'",
":",
"59",
",",
"'I'",
":",
"89",
",",
"'F'",
":",
"103",
",",
"'Y'",
":",
"92",
",",
"'C'",
":",
"149",
",",
"'H'",
":",
"47",
",",
"'Q'",
":",
"42",
",",
"'N'",
":",
"65",
",",
"'K'",
":",
"78",
",",
"'D'",
":",
"85",
",",
"'E'",
":",
"65",
",",
"'M'",
":",
"81",
",",
"'W'",
":",
"128",
",",
"'S'",
":",
"0",
"}",
",",
"'A'",
":",
"{",
"'R'",
":",
"0",
",",
"'L'",
":",
"0",
",",
"'P'",
":",
"0",
",",
"'T'",
":",
"0",
",",
"'A'",
":",
"0",
",",
"'V'",
":",
"64",
",",
"'G'",
":",
"60",
",",
"'I'",
":",
"94",
",",
"'F'",
":",
"113",
",",
"'Y'",
":",
"112",
",",
"'C'",
":",
"195",
",",
"'H'",
":",
"86",
",",
"'Q'",
":",
"91",
",",
"'N'",
":",
"111",
",",
"'K'",
":",
"106",
",",
"'D'",
":",
"126",
",",
"'E'",
":",
"107",
",",
"'M'",
":",
"84",
",",
"'W'",
":",
"148",
",",
"'S'",
":",
"0",
"}",
",",
"'V'",
":",
"{",
"'R'",
":",
"0",
",",
"'L'",
":",
"0",
",",
"'P'",
":",
"0",
",",
"'T'",
":",
"0",
",",
"'A'",
":",
"0",
",",
"'V'",
":",
"0",
",",
"'G'",
":",
"109",
",",
"'I'",
":",
"29",
",",
"'F'",
":",
"50",
",",
"'Y'",
":",
"55",
",",
"'C'",
":",
"192",
",",
"'H'",
":",
"84",
",",
"'Q'",
":",
"96",
",",
"'N'",
":",
"133",
",",
"'K'",
":",
"97",
",",
"'D'",
":",
"152",
",",
"'E'",
":",
"121",
",",
"'M'",
":",
"21",
",",
"'W'",
":",
"88",
",",
"'S'",
":",
"0",
"}",
",",
"'G'",
":",
"{",
"'R'",
":",
"0",
",",
"'L'",
":",
"0",
",",
"'P'",
":",
"0",
",",
"'T'",
":",
"0",
",",
"'A'",
":",
"0",
",",
"'V'",
":",
"0",
",",
"'G'",
":",
"0",
",",
"'I'",
":",
"135",
",",
"'F'",
":",
"153",
",",
"'Y'",
":",
"147",
",",
"'C'",
":",
"159",
",",
"'H'",
":",
"98",
",",
"'Q'",
":",
"87",
",",
"'N'",
":",
"80",
",",
"'K'",
":",
"127",
",",
"'D'",
":",
"94",
",",
"'E'",
":",
"98",
",",
"'M'",
":",
"127",
",",
"'W'",
":",
"184",
",",
"'S'",
":",
"0",
"}",
",",
"'I'",
":",
"{",
"'R'",
":",
"0",
",",
"'L'",
":",
"0",
",",
"'P'",
":",
"0",
",",
"'T'",
":",
"0",
",",
"'A'",
":",
"0",
",",
"'V'",
":",
"0",
",",
"'G'",
":",
"0",
",",
"'I'",
":",
"0",
",",
"'F'",
":",
"21",
",",
"'Y'",
":",
"33",
",",
"'C'",
":",
"198",
",",
"'H'",
":",
"94",
",",
"'Q'",
":",
"109",
",",
"'N'",
":",
"149",
",",
"'K'",
":",
"102",
",",
"'D'",
":",
"168",
",",
"'E'",
":",
"134",
",",
"'M'",
":",
"10",
",",
"'W'",
":",
"61",
",",
"'S'",
":",
"0",
"}",
",",
"'F'",
":",
"{",
"'R'",
":",
"0",
",",
"'L'",
":",
"0",
",",
"'P'",
":",
"0",
",",
"'T'",
":",
"0",
",",
"'A'",
":",
"0",
",",
"'V'",
":",
"0",
",",
"'G'",
":",
"0",
",",
"'I'",
":",
"0",
",",
"'F'",
":",
"0",
",",
"'Y'",
":",
"22",
",",
"'C'",
":",
"205",
",",
"'H'",
":",
"100",
",",
"'Q'",
":",
"116",
",",
"'N'",
":",
"158",
",",
"'K'",
":",
"102",
",",
"'D'",
":",
"177",
",",
"'E'",
":",
"140",
",",
"'M'",
":",
"28",
",",
"'W'",
":",
"40",
",",
"'S'",
":",
"0",
"}",
",",
"'Y'",
":",
"{",
"'R'",
":",
"0",
",",
"'L'",
":",
"0",
",",
"'P'",
":",
"0",
",",
"'T'",
":",
"0",
",",
"'A'",
":",
"0",
",",
"'V'",
":",
"0",
",",
"'G'",
":",
"0",
",",
"'I'",
":",
"0",
",",
"'F'",
":",
"0",
",",
"'Y'",
":",
"0",
",",
"'C'",
":",
"194",
",",
"'H'",
":",
"83",
",",
"'Q'",
":",
"99",
",",
"'N'",
":",
"143",
",",
"'K'",
":",
"85",
",",
"'D'",
":",
"160",
",",
"'E'",
":",
"122",
",",
"'M'",
":",
"36",
",",
"'W'",
":",
"37",
",",
"'S'",
":",
"0",
"}",
",",
"'C'",
":",
"{",
"'R'",
":",
"0",
",",
"'L'",
":",
"0",
",",
"'P'",
":",
"0",
",",
"'T'",
":",
"0",
",",
"'A'",
":",
"0",
",",
"'V'",
":",
"0",
",",
"'G'",
":",
"0",
",",
"'I'",
":",
"0",
",",
"'F'",
":",
"0",
",",
"'Y'",
":",
"0",
",",
"'C'",
":",
"0",
",",
"'H'",
":",
"174",
",",
"'Q'",
":",
"154",
",",
"'N'",
":",
"139",
",",
"'K'",
":",
"202",
",",
"'D'",
":",
"154",
",",
"'E'",
":",
"170",
",",
"'M'",
":",
"196",
",",
"'W'",
":",
"215",
",",
"'S'",
":",
"0",
"}",
",",
"'H'",
":",
"{",
"'R'",
":",
"0",
",",
"'L'",
":",
"0",
",",
"'P'",
":",
"0",
",",
"'T'",
":",
"0",
",",
"'A'",
":",
"0",
",",
"'V'",
":",
"0",
",",
"'G'",
":",
"0",
",",
"'I'",
":",
"0",
",",
"'F'",
":",
"0",
",",
"'Y'",
":",
"0",
",",
"'C'",
":",
"0",
",",
"'H'",
":",
"0",
",",
"'Q'",
":",
"24",
",",
"'N'",
":",
"68",
",",
"'K'",
":",
"32",
",",
"'D'",
":",
"81",
",",
"'E'",
":",
"40",
",",
"'M'",
":",
"87",
",",
"'W'",
":",
"115",
",",
"'S'",
":",
"0",
"}",
",",
"'Q'",
":",
"{",
"'R'",
":",
"0",
",",
"'L'",
":",
"0",
",",
"'P'",
":",
"0",
",",
"'T'",
":",
"0",
",",
"'A'",
":",
"0",
",",
"'V'",
":",
"0",
",",
"'G'",
":",
"0",
",",
"'I'",
":",
"0",
",",
"'F'",
":",
"0",
",",
"'Y'",
":",
"0",
",",
"'C'",
":",
"0",
",",
"'H'",
":",
"0",
",",
"'Q'",
":",
"0",
",",
"'N'",
":",
"46",
",",
"'K'",
":",
"53",
",",
"'D'",
":",
"61",
",",
"'E'",
":",
"29",
",",
"'M'",
":",
"101",
",",
"'W'",
":",
"130",
",",
"'S'",
":",
"0",
"}",
",",
"'N'",
":",
"{",
"'R'",
":",
"0",
",",
"'L'",
":",
"0",
",",
"'P'",
":",
"0",
",",
"'T'",
":",
"0",
",",
"'A'",
":",
"0",
",",
"'V'",
":",
"0",
",",
"'G'",
":",
"0",
",",
"'I'",
":",
"0",
",",
"'F'",
":",
"0",
",",
"'Y'",
":",
"0",
",",
"'C'",
":",
"0",
",",
"'H'",
":",
"0",
",",
"'Q'",
":",
"0",
",",
"'N'",
":",
"0",
",",
"'K'",
":",
"94",
",",
"'D'",
":",
"23",
",",
"'E'",
":",
"42",
",",
"'M'",
":",
"142",
",",
"'W'",
":",
"174",
",",
"'S'",
":",
"0",
"}",
",",
"'K'",
":",
"{",
"'R'",
":",
"0",
",",
"'L'",
":",
"0",
",",
"'P'",
":",
"0",
",",
"'T'",
":",
"0",
",",
"'A'",
":",
"0",
",",
"'V'",
":",
"0",
",",
"'G'",
":",
"0",
",",
"'I'",
":",
"0",
",",
"'F'",
":",
"0",
",",
"'Y'",
":",
"0",
",",
"'C'",
":",
"0",
",",
"'H'",
":",
"0",
",",
"'Q'",
":",
"0",
",",
"'N'",
":",
"0",
",",
"'K'",
":",
"0",
",",
"'D'",
":",
"101",
",",
"'E'",
":",
"56",
",",
"'M'",
":",
"95",
",",
"'W'",
":",
"110",
",",
"'S'",
":",
"0",
"}",
",",
"'D'",
":",
"{",
"'R'",
":",
"0",
",",
"'L'",
":",
"0",
",",
"'P'",
":",
"0",
",",
"'T'",
":",
"0",
",",
"'A'",
":",
"0",
",",
"'V'",
":",
"0",
",",
"'G'",
":",
"0",
",",
"'I'",
":",
"0",
",",
"'F'",
":",
"0",
",",
"'Y'",
":",
"0",
",",
"'C'",
":",
"0",
",",
"'H'",
":",
"0",
",",
"'Q'",
":",
"0",
",",
"'N'",
":",
"0",
",",
"'K'",
":",
"0",
",",
"'D'",
":",
"0",
",",
"'E'",
":",
"45",
",",
"'M'",
":",
"160",
",",
"'W'",
":",
"181",
",",
"'S'",
":",
"0",
"}",
",",
"'E'",
":",
"{",
"'R'",
":",
"0",
",",
"'L'",
":",
"0",
",",
"'P'",
":",
"0",
",",
"'T'",
":",
"0",
",",
"'A'",
":",
"0",
",",
"'V'",
":",
"0",
",",
"'G'",
":",
"0",
",",
"'I'",
":",
"0",
",",
"'F'",
":",
"0",
",",
"'Y'",
":",
"0",
",",
"'C'",
":",
"0",
",",
"'H'",
":",
"0",
",",
"'Q'",
":",
"0",
",",
"'N'",
":",
"0",
",",
"'K'",
":",
"0",
",",
"'D'",
":",
"0",
",",
"'E'",
":",
"0",
",",
"'M'",
":",
"126",
",",
"'W'",
":",
"152",
",",
"'S'",
":",
"0",
"}",
",",
"'M'",
":",
"{",
"'R'",
":",
"0",
",",
"'L'",
":",
"0",
",",
"'P'",
":",
"0",
",",
"'T'",
":",
"0",
",",
"'A'",
":",
"0",
",",
"'V'",
":",
"0",
",",
"'G'",
":",
"0",
",",
"'I'",
":",
"0",
",",
"'F'",
":",
"0",
",",
"'Y'",
":",
"0",
",",
"'C'",
":",
"0",
",",
"'H'",
":",
"0",
",",
"'Q'",
":",
"0",
",",
"'N'",
":",
"0",
",",
"'K'",
":",
"0",
",",
"'D'",
":",
"0",
",",
"'E'",
":",
"0",
",",
"'M'",
":",
"0",
",",
"'W'",
":",
"67",
",",
"'S'",
":",
"0",
"}",
",",
"'W'",
":",
"{",
"'R'",
":",
"0",
",",
"'L'",
":",
"0",
",",
"'P'",
":",
"0",
",",
"'T'",
":",
"0",
",",
"'A'",
":",
"0",
",",
"'V'",
":",
"0",
",",
"'G'",
":",
"0",
",",
"'I'",
":",
"0",
",",
"'F'",
":",
"0",
",",
"'Y'",
":",
"0",
",",
"'C'",
":",
"0",
",",
"'H'",
":",
"0",
",",
"'Q'",
":",
"0",
",",
"'N'",
":",
"0",
",",
"'K'",
":",
"0",
",",
"'D'",
":",
"0",
",",
"'E'",
":",
"0",
",",
"'M'",
":",
"0",
",",
"'W'",
":",
"0",
",",
"'S'",
":",
"0",
"}",
"}",
"score",
"=",
"0",
"if",
"ref_aa",
"not",
"in",
"grantham",
"or",
"mut_aa",
"not",
"in",
"grantham",
":",
"log",
".",
"error",
"(",
"'{} to {}: a residue is not in the Grantham matrix'",
".",
"format",
"(",
"ref_aa",
",",
"mut_aa",
")",
")",
"return",
"score",
",",
"'Unknown'",
"if",
"ref_aa",
"==",
"mut_aa",
":",
"return",
"score",
",",
"'Conservative'",
"else",
":",
"if",
"int",
"(",
"grantham",
"[",
"ref_aa",
"]",
"[",
"mut_aa",
"]",
")",
"!=",
"0",
":",
"score",
"+=",
"int",
"(",
"grantham",
"[",
"ref_aa",
"]",
"[",
"mut_aa",
"]",
")",
"else",
":",
"score",
"+=",
"int",
"(",
"grantham",
"[",
"mut_aa",
"]",
"[",
"ref_aa",
"]",
")",
"if",
"score",
">",
"150",
":",
"return",
"score",
",",
"\"Radical\"",
"elif",
"150",
">=",
"score",
">",
"100",
":",
"return",
"score",
",",
"\"Moderately Radical\"",
"elif",
"100",
">=",
"score",
">",
"50",
":",
"return",
"score",
",",
"\"Moderately Conservative\"",
"else",
":",
"return",
"score",
",",
"\"Conservative\""
] | python | https://github.com/ashutoshkpandey/Annotation/blob/master/Grantham_score_calculator.py | false |
2,527,575 | def convert(self, path, version, target = None):
"""Converts the specified file using the relevant template.
:arg path: the full path to the file to convert.
:arg version: the new version of the file.
:arg target: the optional path to save the file under. If not
specified, the file is saved based on the template file name.
"""
#Get the template and values out of the XML input file and
#write them in the format of the keywordless file.
values, template = self.parse(path)
lines = template.write(values, version)
#Finally, write the lines to the correct path.
if target is None:
target = os.path.join(os.path.dirname(path), template.name)
with open(os.path.expanduser(target), 'w') as f:
f.write("\n".join(lines)) | [
"def",
"convert",
"(",
"self",
",",
"path",
",",
"version",
",",
"target",
"=",
"None",
")",
":",
"values",
",",
"template",
"=",
"self",
".",
"parse",
"(",
"path",
")",
"lines",
"=",
"template",
".",
"write",
"(",
"values",
",",
"version",
")",
"if",
"target",
"is",
"None",
":",
"target",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
",",
"template",
".",
"name",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"target",
")",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"\"\\n\"",
".",
"join",
"(",
"lines",
")",
")"
] | python | Converts the specified file using the relevant template.
:arg path: the full path to the file to convert.
:arg version: the new version of the file.
:arg target: the optional path to save the file under. If not
specified, the file is saved based on the template file name. | false |
1,995,784 | def rmtree(path):
"""A version of rmtree that can deal with read-only files and directories.
Needed because the stock shutil.rmtree() fails with an access error
when there are read-only files in the directory on Windows, or when the
directory itself is read-only on Unix.
"""
def onerror(func, path, exc_info):
# Did you know what on Python 3.3 on Windows os.remove() and
# os.unlink() are distinct functions?
if func is os.remove or func is os.unlink or func is os.rmdir:
if sys.platform != 'win32':
chmod_plus(os.path.dirname(path), stat.S_IWUSR | stat.S_IXUSR)
chmod_plus(path)
func(path)
else:
raise
shutil.rmtree(path, onerror=onerror) | [
"def",
"rmtree",
"(",
"path",
")",
":",
"def",
"onerror",
"(",
"func",
",",
"path",
",",
"exc_info",
")",
":",
"if",
"func",
"is",
"os",
".",
"remove",
"or",
"func",
"is",
"os",
".",
"unlink",
"or",
"func",
"is",
"os",
".",
"rmdir",
":",
"if",
"sys",
".",
"platform",
"!=",
"'win32'",
":",
"chmod_plus",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
",",
"stat",
".",
"S_IWUSR",
"|",
"stat",
".",
"S_IXUSR",
")",
"chmod_plus",
"(",
"path",
")",
"func",
"(",
"path",
")",
"else",
":",
"raise",
"shutil",
".",
"rmtree",
"(",
"path",
",",
"onerror",
"=",
"onerror",
")"
] | python | A version of rmtree that can deal with read-only files and directories.
Needed because the stock shutil.rmtree() fails with an access error
when there are read-only files in the directory on Windows, or when the
directory itself is read-only on Unix. | false |
2,061,899 | def connections(self):
"""
Gets the Connections API client.
Returns:
Connections:
"""
if not self.__connections:
self.__connections = Connections(
self.__connection)
return self.__connections | [
"def",
"connections",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"__connections",
":",
"self",
".",
"__connections",
"=",
"Connections",
"(",
"self",
".",
"__connection",
")",
"return",
"self",
".",
"__connections"
] | python | Gets the Connections API client.
Returns:
Connections: | false |
1,643,138 | def _parse_caps_loader(node):
'''
Parse the <loader> element of the domain capabilities.
'''
enums = [_parse_caps_enum(enum) for enum in node.findall('enum')]
result = {item[0]: item[1] for item in enums if item[0]}
values = [child.text for child in node.findall('value')]
if values:
result['values'] = values
return result | [
"def",
"_parse_caps_loader",
"(",
"node",
")",
":",
"enums",
"=",
"[",
"_parse_caps_enum",
"(",
"enum",
")",
"for",
"enum",
"in",
"node",
".",
"findall",
"(",
"'enum'",
")",
"]",
"result",
"=",
"{",
"item",
"[",
"0",
"]",
":",
"item",
"[",
"1",
"]",
"for",
"item",
"in",
"enums",
"if",
"item",
"[",
"0",
"]",
"}",
"values",
"=",
"[",
"child",
".",
"text",
"for",
"child",
"in",
"node",
".",
"findall",
"(",
"'value'",
")",
"]",
"if",
"values",
":",
"result",
"[",
"'values'",
"]",
"=",
"values",
"return",
"result"
] | python | Parse the <loader> element of the domain capabilities. | false |
2,504,133 | def update_content_by_id(self, content_data, content_id, callback=None):
"""
Updates a piece of Content, or restores if it is trashed.
The body contains the representation of the content. Must include the new version number.
To restore a piece of content that has the status of trashed the content must have it's version incremented,
and status set to current. No other field modifications will be performed when restoring a piece of content
from the trash.
Request example to restore from trash: { "id": "557059", "status": "current", "version": { "number": 2 } }
:param content_data (dict): The content data (with desired updates). This should be retrieved via the API
call to get content data, then modified to desired state. Required keys are:
"id", "type", "title", "space", "version", and "body".
:param content_id (string): The id of the content to update.
:param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns.
Default: None (no callback, raw data returned).
:return: The JSON data returned from the content/{id} endpoint,
or the results of the callback. Will raise requests.HTTPError on bad input, potentially.
Example content data:
{
"id": "3604482",
"type": "page",
"title": "Example Content title",
"space": {
"key": "TST"
},
"version": {
"number": 2,
"minorEdit": false
},
"body": {
"storage": {
"value": "<p>This is the updated text for the new page</p>",
"representation": "storage"
}
}
}
"""
assert isinstance(content_data, dict) and set(content_data.keys()) >= self.UPDATE_CONTENT_REQUIRED_KEYS
return self._service_put_request("rest/api/content/{id}".format(id=content_id), data=json.dumps(content_data),
headers={"Content-Type": "application/json"}, callback=callback) | [
"def",
"update_content_by_id",
"(",
"self",
",",
"content_data",
",",
"content_id",
",",
"callback",
"=",
"None",
")",
":",
"assert",
"isinstance",
"(",
"content_data",
",",
"dict",
")",
"and",
"set",
"(",
"content_data",
".",
"keys",
"(",
")",
")",
">=",
"self",
".",
"UPDATE_CONTENT_REQUIRED_KEYS",
"return",
"self",
".",
"_service_put_request",
"(",
"\"rest/api/content/{id}\"",
".",
"format",
"(",
"id",
"=",
"content_id",
")",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"content_data",
")",
",",
"headers",
"=",
"{",
"\"Content-Type\"",
":",
"\"application/json\"",
"}",
",",
"callback",
"=",
"callback",
")"
] | python | Updates a piece of Content, or restores if it is trashed.
The body contains the representation of the content. Must include the new version number.
To restore a piece of content that has the status of trashed the content must have it's version incremented,
and status set to current. No other field modifications will be performed when restoring a piece of content
from the trash.
Request example to restore from trash: { "id": "557059", "status": "current", "version": { "number": 2 } }
:param content_data (dict): The content data (with desired updates). This should be retrieved via the API
call to get content data, then modified to desired state. Required keys are:
"id", "type", "title", "space", "version", and "body".
:param content_id (string): The id of the content to update.
:param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns.
Default: None (no callback, raw data returned).
:return: The JSON data returned from the content/{id} endpoint,
or the results of the callback. Will raise requests.HTTPError on bad input, potentially.
Example content data:
{
"id": "3604482",
"type": "page",
"title": "Example Content title",
"space": {
"key": "TST"
},
"version": {
"number": 2,
"minorEdit": false
},
"body": {
"storage": {
"value": "<p>This is the updated text for the new page</p>",
"representation": "storage"
}
}
} | false |
2,355,509 | def __init__(self, build_json_store, inner_template=None,
outer_template=None, customize_conf=None):
"""
:param build_json_store: str, path to directory with JSON build files
:param inner_template: str, path to inner template JSON
:param outer_template: str, path to outer template JSON
:param customize_conf: str, path to customize configuration JSON
"""
self.spec = BuildSpec()
self.build_json_store = build_json_store
self._inner_template_path = inner_template or DEFAULT_INNER_TEMPLATE
self._outer_template_path = outer_template or DEFAULT_OUTER_TEMPLATE
self._customize_conf_path = customize_conf or DEFAULT_CUSTOMIZE_CONF
self.build_json = None # rendered template
self._template = None # template loaded from filesystem
self._inner_template = None # dock json
self._customize_conf = None # site customize conf for _inner_template
self._dj = None
self._resource_limits = None
self._openshift_required_version = parse_version('1.0.6')
self._repo_info = None
# For the koji "scratch" build type
self.scratch = None
self.isolated = None
self.is_auto = None
self.base_image = None
self.scratch_build_node_selector = None
self.explicit_build_node_selector = None
self.auto_build_node_selector = None
self.isolated_build_node_selector = None
self.is_auto = None
# forward reference
self.platform_node_selector = None
self.platform_descriptors = None | [
"def",
"__init__",
"(",
"self",
",",
"build_json_store",
",",
"inner_template",
"=",
"None",
",",
"outer_template",
"=",
"None",
",",
"customize_conf",
"=",
"None",
")",
":",
"self",
".",
"spec",
"=",
"BuildSpec",
"(",
")",
"self",
".",
"build_json_store",
"=",
"build_json_store",
"self",
".",
"_inner_template_path",
"=",
"inner_template",
"or",
"DEFAULT_INNER_TEMPLATE",
"self",
".",
"_outer_template_path",
"=",
"outer_template",
"or",
"DEFAULT_OUTER_TEMPLATE",
"self",
".",
"_customize_conf_path",
"=",
"customize_conf",
"or",
"DEFAULT_CUSTOMIZE_CONF",
"self",
".",
"build_json",
"=",
"None",
"self",
".",
"_template",
"=",
"None",
"self",
".",
"_inner_template",
"=",
"None",
"self",
".",
"_customize_conf",
"=",
"None",
"self",
".",
"_dj",
"=",
"None",
"self",
".",
"_resource_limits",
"=",
"None",
"self",
".",
"_openshift_required_version",
"=",
"parse_version",
"(",
"'1.0.6'",
")",
"self",
".",
"_repo_info",
"=",
"None",
"self",
".",
"scratch",
"=",
"None",
"self",
".",
"isolated",
"=",
"None",
"self",
".",
"is_auto",
"=",
"None",
"self",
".",
"base_image",
"=",
"None",
"self",
".",
"scratch_build_node_selector",
"=",
"None",
"self",
".",
"explicit_build_node_selector",
"=",
"None",
"self",
".",
"auto_build_node_selector",
"=",
"None",
"self",
".",
"isolated_build_node_selector",
"=",
"None",
"self",
".",
"is_auto",
"=",
"None",
"self",
".",
"platform_node_selector",
"=",
"None",
"self",
".",
"platform_descriptors",
"=",
"None"
] | python | :param build_json_store: str, path to directory with JSON build files
:param inner_template: str, path to inner template JSON
:param outer_template: str, path to outer template JSON
:param customize_conf: str, path to customize configuration JSON | false |
2,073,508 | def delete_order(self, order_id):
"""Deletes an existing order transaction."""
request = self._delete("transactions/orders/" + str(order_id))
return self.responder(request) | [
"def",
"delete_order",
"(",
"self",
",",
"order_id",
")",
":",
"request",
"=",
"self",
".",
"_delete",
"(",
"\"transactions/orders/\"",
"+",
"str",
"(",
"order_id",
")",
")",
"return",
"self",
".",
"responder",
"(",
"request",
")"
] | python | Deletes an existing order transaction. | false |
1,710,922 | def lengths_and_angles(self) -> Tuple[Tuple[float, float, float], Tuple[float, float, float]]:
"""
Returns (lattice lengths, lattice angles).
"""
return self.lengths, self.angles | [
"def",
"lengths_and_angles",
"(",
"self",
")",
"->",
"Tuple",
"[",
"Tuple",
"[",
"float",
",",
"float",
",",
"float",
"]",
",",
"Tuple",
"[",
"float",
",",
"float",
",",
"float",
"]",
"]",
":",
"return",
"self",
".",
"lengths",
",",
"self",
".",
"angles"
] | python | Returns (lattice lengths, lattice angles). | false |
2,575,230 | def quit(self):
"""QUIT command.
Tells the server to close the connection. After the server acknowledges
the request to quit the connection is closed both at the server and
client. Only useful for graceful shutdown. If you are in a generator
use close() instead.
Once this method has been called, no other methods of the NNTPClient
object should be called.
See <http://tools.ietf.org/html/rfc3977#section-5.4>
"""
code, message = self.command("QUIT")
if code != 205:
raise NNTPReplyError(code, message)
self.socket.close() | [
"def",
"quit",
"(",
"self",
")",
":",
"code",
",",
"message",
"=",
"self",
".",
"command",
"(",
"\"QUIT\"",
")",
"if",
"code",
"!=",
"205",
":",
"raise",
"NNTPReplyError",
"(",
"code",
",",
"message",
")",
"self",
".",
"socket",
".",
"close",
"(",
")"
] | python | QUIT command.
Tells the server to close the connection. After the server acknowledges
the request to quit the connection is closed both at the server and
client. Only useful for graceful shutdown. If you are in a generator
use close() instead.
Once this method has been called, no other methods of the NNTPClient
object should be called.
See <http://tools.ietf.org/html/rfc3977#section-5.4> | false |
2,184,416 | def dumps(xs, model=None, properties=False, indent=True, **kwargs):
"""
Serialize Xmrs (or subclass) objects to PENMAN notation
Args:
xs: iterator of :class:`~delphin.mrs.xmrs.Xmrs` objects to
serialize
model: Xmrs subclass used to get triples
properties: if `True`, encode variable properties
indent: if `True`, adaptively indent; if `False` or `None`,
don't indent; if a non-negative integer N, indent N spaces
per level
Returns:
the PENMAN serialization of *xs*
"""
xs = list(xs)
if not xs:
return ''
given_class = xs[0].__class__ # assume they are all the same
if model is None:
model = xs[0].__class__
if not hasattr(model, 'to_triples'):
raise TypeError(
'{} class does not implement to_triples()'.format(model.__name__)
)
# convert MRS to DMRS if necessary; EDS cannot convert
if given_class.__name__ in ('Mrs', 'Xmrs'):
xs = [model.from_xmrs(x, **kwargs) for x in xs]
elif given_class.__name__ == 'Eds' and model.__name__ != 'Eds':
raise ValueError('Cannot convert EDS to non-EDS')
codec = XMRSCodec()
graphs = [
codec.triples_to_graph(model.to_triples(x, properties=properties))
for x in xs
]
if 'pretty_print' in kwargs:
indent = kwargs['pretty_print']
return penman.dumps(graphs, cls=XMRSCodec, indent=indent) | [
"def",
"dumps",
"(",
"xs",
",",
"model",
"=",
"None",
",",
"properties",
"=",
"False",
",",
"indent",
"=",
"True",
",",
"**",
"kwargs",
")",
":",
"xs",
"=",
"list",
"(",
"xs",
")",
"if",
"not",
"xs",
":",
"return",
"''",
"given_class",
"=",
"xs",
"[",
"0",
"]",
".",
"__class__",
"if",
"model",
"is",
"None",
":",
"model",
"=",
"xs",
"[",
"0",
"]",
".",
"__class__",
"if",
"not",
"hasattr",
"(",
"model",
",",
"'to_triples'",
")",
":",
"raise",
"TypeError",
"(",
"'{} class does not implement to_triples()'",
".",
"format",
"(",
"model",
".",
"__name__",
")",
")",
"if",
"given_class",
".",
"__name__",
"in",
"(",
"'Mrs'",
",",
"'Xmrs'",
")",
":",
"xs",
"=",
"[",
"model",
".",
"from_xmrs",
"(",
"x",
",",
"**",
"kwargs",
")",
"for",
"x",
"in",
"xs",
"]",
"elif",
"given_class",
".",
"__name__",
"==",
"'Eds'",
"and",
"model",
".",
"__name__",
"!=",
"'Eds'",
":",
"raise",
"ValueError",
"(",
"'Cannot convert EDS to non-EDS'",
")",
"codec",
"=",
"XMRSCodec",
"(",
")",
"graphs",
"=",
"[",
"codec",
".",
"triples_to_graph",
"(",
"model",
".",
"to_triples",
"(",
"x",
",",
"properties",
"=",
"properties",
")",
")",
"for",
"x",
"in",
"xs",
"]",
"if",
"'pretty_print'",
"in",
"kwargs",
":",
"indent",
"=",
"kwargs",
"[",
"'pretty_print'",
"]",
"return",
"penman",
".",
"dumps",
"(",
"graphs",
",",
"cls",
"=",
"XMRSCodec",
",",
"indent",
"=",
"indent",
")"
] | python | Serialize Xmrs (or subclass) objects to PENMAN notation
Args:
xs: iterator of :class:`~delphin.mrs.xmrs.Xmrs` objects to
serialize
model: Xmrs subclass used to get triples
properties: if `True`, encode variable properties
indent: if `True`, adaptively indent; if `False` or `None`,
don't indent; if a non-negative integer N, indent N spaces
per level
Returns:
the PENMAN serialization of *xs* | false |
2,477,480 | def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Order):
return False
return self.__dict__ == other.__dict__ | [
"def",
"__eq__",
"(",
"self",
",",
"other",
")",
":",
"if",
"not",
"isinstance",
"(",
"other",
",",
"Order",
")",
":",
"return",
"False",
"return",
"self",
".",
"__dict__",
"==",
"other",
".",
"__dict__"
] | python | Returns true if both objects are equal | false |
2,145,444 | def _concat(self, egdfs):
"""
Concatenate evaluated group dataframes
Parameters
----------
egdfs : iterable
Evaluated dataframes
Returns
-------
edata : pandas.DataFrame
Evaluated data
"""
egdfs = list(egdfs)
edata = pd.concat(egdfs, axis=0, ignore_index=False, copy=False)
# groupby can mixup the rows. We try to maintain the original
# order, but we can only do that if the result has a one to
# one relationship with the original
one2one = (
self.keep_index and
not any(edata.index.duplicated()) and
len(edata.index) == len(self.data.index))
if one2one:
edata = edata.sort_index()
else:
edata.reset_index(drop=True, inplace=True)
# Maybe this should happen in the verb functions
if self.keep_groups and self.groups:
edata = GroupedDataFrame(edata, groups=self.groups)
return edata | [
"def",
"_concat",
"(",
"self",
",",
"egdfs",
")",
":",
"egdfs",
"=",
"list",
"(",
"egdfs",
")",
"edata",
"=",
"pd",
".",
"concat",
"(",
"egdfs",
",",
"axis",
"=",
"0",
",",
"ignore_index",
"=",
"False",
",",
"copy",
"=",
"False",
")",
"one2one",
"=",
"(",
"self",
".",
"keep_index",
"and",
"not",
"any",
"(",
"edata",
".",
"index",
".",
"duplicated",
"(",
")",
")",
"and",
"len",
"(",
"edata",
".",
"index",
")",
"==",
"len",
"(",
"self",
".",
"data",
".",
"index",
")",
")",
"if",
"one2one",
":",
"edata",
"=",
"edata",
".",
"sort_index",
"(",
")",
"else",
":",
"edata",
".",
"reset_index",
"(",
"drop",
"=",
"True",
",",
"inplace",
"=",
"True",
")",
"if",
"self",
".",
"keep_groups",
"and",
"self",
".",
"groups",
":",
"edata",
"=",
"GroupedDataFrame",
"(",
"edata",
",",
"groups",
"=",
"self",
".",
"groups",
")",
"return",
"edata"
] | python | Concatenate evaluated group dataframes
Parameters
----------
egdfs : iterable
Evaluated dataframes
Returns
-------
edata : pandas.DataFrame
Evaluated data | false |
1,840,442 | def get_services_uids(context=None, analyses_serv=None, values=None):
"""
This function returns a list of UIDs from analyses services from its
parameters.
:param analyses_serv: A list (or one object) of service-related info items.
see _resolve_items_to_service_uids() docstring.
:type analyses_serv: list
:param values: a dict, where keys are AR|Sample schema field names.
:type values: dict
:returns: a list of analyses services UIDs
"""
if not analyses_serv:
analyses_serv = []
if not values:
values = {}
if not context or (not analyses_serv and not values):
raise RuntimeError(
"get_services_uids: Missing or wrong parameters.")
# Merge analyses from analyses_serv and values into one list
analyses_services = analyses_serv + (values.get("Analyses", None) or [])
# It is possible to create analysis requests
# by JSON petitions and services, profiles or types aren't allways send.
# Sometimes we can get analyses and profiles that doesn't match and we
# should act in consequence.
# Getting the analyses profiles
analyses_profiles = values.get('Profiles', [])
if not isinstance(analyses_profiles, (list, tuple)):
# Plone converts the incoming form value to a list, if there are
# multiple values; but if not, it will send a string (a single UID).
analyses_profiles = [analyses_profiles]
if not analyses_services and not analyses_profiles:
return []
# Add analysis services UIDs from profiles to analyses_services variable.
if analyses_profiles:
uid_catalog = getToolByName(context, 'uid_catalog')
for brain in uid_catalog(UID=analyses_profiles):
profile = api.get_object(brain)
# Only services UIDs
services_uids = profile.getRawService()
# _resolve_items_to_service_uids() will remove duplicates
analyses_services += services_uids
return _resolve_items_to_service_uids(analyses_services) | [
"def",
"get_services_uids",
"(",
"context",
"=",
"None",
",",
"analyses_serv",
"=",
"None",
",",
"values",
"=",
"None",
")",
":",
"if",
"not",
"analyses_serv",
":",
"analyses_serv",
"=",
"[",
"]",
"if",
"not",
"values",
":",
"values",
"=",
"{",
"}",
"if",
"not",
"context",
"or",
"(",
"not",
"analyses_serv",
"and",
"not",
"values",
")",
":",
"raise",
"RuntimeError",
"(",
"\"get_services_uids: Missing or wrong parameters.\"",
")",
"analyses_services",
"=",
"analyses_serv",
"+",
"(",
"values",
".",
"get",
"(",
"\"Analyses\"",
",",
"None",
")",
"or",
"[",
"]",
")",
"analyses_profiles",
"=",
"values",
".",
"get",
"(",
"'Profiles'",
",",
"[",
"]",
")",
"if",
"not",
"isinstance",
"(",
"analyses_profiles",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"analyses_profiles",
"=",
"[",
"analyses_profiles",
"]",
"if",
"not",
"analyses_services",
"and",
"not",
"analyses_profiles",
":",
"return",
"[",
"]",
"if",
"analyses_profiles",
":",
"uid_catalog",
"=",
"getToolByName",
"(",
"context",
",",
"'uid_catalog'",
")",
"for",
"brain",
"in",
"uid_catalog",
"(",
"UID",
"=",
"analyses_profiles",
")",
":",
"profile",
"=",
"api",
".",
"get_object",
"(",
"brain",
")",
"services_uids",
"=",
"profile",
".",
"getRawService",
"(",
")",
"analyses_services",
"+=",
"services_uids",
"return",
"_resolve_items_to_service_uids",
"(",
"analyses_services",
")"
] | python | This function returns a list of UIDs from analyses services from its
parameters.
:param analyses_serv: A list (or one object) of service-related info items.
see _resolve_items_to_service_uids() docstring.
:type analyses_serv: list
:param values: a dict, where keys are AR|Sample schema field names.
:type values: dict
:returns: a list of analyses services UIDs | false |
1,739,143 | def create_packet(reqid, message):
"""Creates Outgoing Packet from a given reqid and message
:param reqid: REQID object
:param message: protocol buffer object
"""
assert message.IsInitialized()
packet = ''
# calculate the totla size of the packet incl. header
typename = message.DESCRIPTOR.full_name
datasize = HeronProtocol.get_size_to_pack_string(typename) + \
REQID.REQID_SIZE + HeronProtocol.get_size_to_pack_message(message)
# first write out how much data is there as the header
packet += HeronProtocol.pack_int(datasize)
# next write the type string
packet += HeronProtocol.pack_int(len(typename))
packet += typename
# reqid
packet += reqid.pack()
# add the proto
packet += HeronProtocol.pack_int(message.ByteSize())
packet += message.SerializeToString()
return OutgoingPacket(packet) | [
"def",
"create_packet",
"(",
"reqid",
",",
"message",
")",
":",
"assert",
"message",
".",
"IsInitialized",
"(",
")",
"packet",
"=",
"''",
"typename",
"=",
"message",
".",
"DESCRIPTOR",
".",
"full_name",
"datasize",
"=",
"HeronProtocol",
".",
"get_size_to_pack_string",
"(",
"typename",
")",
"+",
"REQID",
".",
"REQID_SIZE",
"+",
"HeronProtocol",
".",
"get_size_to_pack_message",
"(",
"message",
")",
"packet",
"+=",
"HeronProtocol",
".",
"pack_int",
"(",
"datasize",
")",
"packet",
"+=",
"HeronProtocol",
".",
"pack_int",
"(",
"len",
"(",
"typename",
")",
")",
"packet",
"+=",
"typename",
"packet",
"+=",
"reqid",
".",
"pack",
"(",
")",
"packet",
"+=",
"HeronProtocol",
".",
"pack_int",
"(",
"message",
".",
"ByteSize",
"(",
")",
")",
"packet",
"+=",
"message",
".",
"SerializeToString",
"(",
")",
"return",
"OutgoingPacket",
"(",
"packet",
")"
] | python | Creates Outgoing Packet from a given reqid and message
:param reqid: REQID object
:param message: protocol buffer object | false |
1,741,235 | def __init__(self, document_body=None,
transport=None, **kwargs):
"""
Create Grab instance
"""
self.meta = {}
self._doc = None
self.config = default_config()
self.config['common_headers'] = self.common_headers()
self.cookies = CookieManager()
self.proxylist = ProxyList()
self.exception = None
# makes pylint happy
self.request_counter = None
self.request_head = None
self.request_body = None
self.request_method = None
self.transport_param = transport
self.transport = None
self.reset()
if kwargs:
self.setup(**kwargs)
if document_body is not None:
self.setup_document(document_body) | [
"def",
"__init__",
"(",
"self",
",",
"document_body",
"=",
"None",
",",
"transport",
"=",
"None",
",",
"**",
"kwargs",
")",
":",
"self",
".",
"meta",
"=",
"{",
"}",
"self",
".",
"_doc",
"=",
"None",
"self",
".",
"config",
"=",
"default_config",
"(",
")",
"self",
".",
"config",
"[",
"'common_headers'",
"]",
"=",
"self",
".",
"common_headers",
"(",
")",
"self",
".",
"cookies",
"=",
"CookieManager",
"(",
")",
"self",
".",
"proxylist",
"=",
"ProxyList",
"(",
")",
"self",
".",
"exception",
"=",
"None",
"self",
".",
"request_counter",
"=",
"None",
"self",
".",
"request_head",
"=",
"None",
"self",
".",
"request_body",
"=",
"None",
"self",
".",
"request_method",
"=",
"None",
"self",
".",
"transport_param",
"=",
"transport",
"self",
".",
"transport",
"=",
"None",
"self",
".",
"reset",
"(",
")",
"if",
"kwargs",
":",
"self",
".",
"setup",
"(",
"**",
"kwargs",
")",
"if",
"document_body",
"is",
"not",
"None",
":",
"self",
".",
"setup_document",
"(",
"document_body",
")"
] | python | Create Grab instance | false |
1,991,197 | def _get_preset_id(package, size):
"""Get the preset id given the keyName of the preset."""
for preset in package['activePresets'] + package['accountRestrictedActivePresets']:
if preset['keyName'] == size or preset['id'] == size:
return preset['id']
raise SoftLayer.SoftLayerError("Could not find valid size for: '%s'" % size) | [
"def",
"_get_preset_id",
"(",
"package",
",",
"size",
")",
":",
"for",
"preset",
"in",
"package",
"[",
"'activePresets'",
"]",
"+",
"package",
"[",
"'accountRestrictedActivePresets'",
"]",
":",
"if",
"preset",
"[",
"'keyName'",
"]",
"==",
"size",
"or",
"preset",
"[",
"'id'",
"]",
"==",
"size",
":",
"return",
"preset",
"[",
"'id'",
"]",
"raise",
"SoftLayer",
".",
"SoftLayerError",
"(",
"\"Could not find valid size for: '%s'\"",
"%",
"size",
")"
] | python | Get the preset id given the keyName of the preset. | false |
2,213,277 | def cmp_public_numbers(pn1, pn2):
"""
Compare 2 sets of public numbers. These is a way to compare
2 public RSA keys. If the sets are the same then the keys are the same.
:param pn1: The set of values belonging to the 1st key
:param pn2: The set of values belonging to the 2nd key
:return: True is the sets are the same otherwise False.
"""
if pn1.n == pn2.n:
if pn1.e == pn2.e:
return True
return False | [
"def",
"cmp_public_numbers",
"(",
"pn1",
",",
"pn2",
")",
":",
"if",
"pn1",
".",
"n",
"==",
"pn2",
".",
"n",
":",
"if",
"pn1",
".",
"e",
"==",
"pn2",
".",
"e",
":",
"return",
"True",
"return",
"False"
] | python | Compare 2 sets of public numbers. These is a way to compare
2 public RSA keys. If the sets are the same then the keys are the same.
:param pn1: The set of values belonging to the 1st key
:param pn2: The set of values belonging to the 2nd key
:return: True is the sets are the same otherwise False. | false |
2,491,330 | def pysal_Moran(self, **kwargs):
"""
Compute Moran's I measure of global spatial autocorrelation for GeoRaster
Usage:
geo.pysal_Moran(permutations = 1000, rook=True)
arguments passed to raster_weights() and pysal.Moran
See help(gr.raster_weights), help(pysal.Moran) for options
"""
if self.weights is None:
self.raster_weights(**kwargs)
rasterf = self.raster.flatten()
rasterf = rasterf[rasterf.mask==False]
self.Moran = pysal.Moran(rasterf, self.weights, **kwargs) | [
"def",
"pysal_Moran",
"(",
"self",
",",
"**",
"kwargs",
")",
":",
"if",
"self",
".",
"weights",
"is",
"None",
":",
"self",
".",
"raster_weights",
"(",
"**",
"kwargs",
")",
"rasterf",
"=",
"self",
".",
"raster",
".",
"flatten",
"(",
")",
"rasterf",
"=",
"rasterf",
"[",
"rasterf",
".",
"mask",
"==",
"False",
"]",
"self",
".",
"Moran",
"=",
"pysal",
".",
"Moran",
"(",
"rasterf",
",",
"self",
".",
"weights",
",",
"**",
"kwargs",
")"
] | python | Compute Moran's I measure of global spatial autocorrelation for GeoRaster
Usage:
geo.pysal_Moran(permutations = 1000, rook=True)
arguments passed to raster_weights() and pysal.Moran
See help(gr.raster_weights), help(pysal.Moran) for options | false |
2,197,417 | def __init__(self, quality=Kinect2BridgedQuality.HD, frame='kinect2_rgb_optical_frame'):
"""Initialize a Kinect v2 sensor which connects to the iai_kinect2 bridge
----------
quality : :obj:`str`
The quality (HD, Quarter-HD, SD) of the image data that should be subscribed to
frame : :obj:`str`
The name of the frame of reference in which the sensor resides.
If None, this will be set to 'kinect2_rgb_optical_frame'
"""
# set member vars
self._frame = frame
self.topic_image_color = '/kinect2/%s/image_color_rect' %(quality)
self.topic_image_depth = '/kinect2/%s/image_depth_rect' %(quality)
self.topic_info_camera = '/kinect2/%s/camera_info' %(quality)
self._initialized = False
self._format = None
self._camera_intr = None
self._cur_depth_im = None
self._running = False
self._bridge = CvBridge() | [
"def",
"__init__",
"(",
"self",
",",
"quality",
"=",
"Kinect2BridgedQuality",
".",
"HD",
",",
"frame",
"=",
"'kinect2_rgb_optical_frame'",
")",
":",
"self",
".",
"_frame",
"=",
"frame",
"self",
".",
"topic_image_color",
"=",
"'/kinect2/%s/image_color_rect'",
"%",
"(",
"quality",
")",
"self",
".",
"topic_image_depth",
"=",
"'/kinect2/%s/image_depth_rect'",
"%",
"(",
"quality",
")",
"self",
".",
"topic_info_camera",
"=",
"'/kinect2/%s/camera_info'",
"%",
"(",
"quality",
")",
"self",
".",
"_initialized",
"=",
"False",
"self",
".",
"_format",
"=",
"None",
"self",
".",
"_camera_intr",
"=",
"None",
"self",
".",
"_cur_depth_im",
"=",
"None",
"self",
".",
"_running",
"=",
"False",
"self",
".",
"_bridge",
"=",
"CvBridge",
"(",
")"
] | python | Initialize a Kinect v2 sensor which connects to the iai_kinect2 bridge
----------
quality : :obj:`str`
The quality (HD, Quarter-HD, SD) of the image data that should be subscribed to
frame : :obj:`str`
The name of the frame of reference in which the sensor resides.
If None, this will be set to 'kinect2_rgb_optical_frame' | false |
1,575,248 | def label_empty(self, **kwargs):
"Label every item with an `EmptyLabel`."
kwargs['label_cls'] = EmptyLabelList
return self.label_from_func(func=lambda o: 0., **kwargs) | [
"def",
"label_empty",
"(",
"self",
",",
"**",
"kwargs",
")",
":",
"kwargs",
"[",
"'label_cls'",
"]",
"=",
"EmptyLabelList",
"return",
"self",
".",
"label_from_func",
"(",
"func",
"=",
"lambda",
"o",
":",
"0.",
",",
"**",
"kwargs",
")"
] | python | Label every item with an `EmptyLabel`. | false |
1,804,648 | def make_hashcode(uri, payload, headers):
"""Generate a SHA1 based on the given arguments.
Hashcodes created by this method will used as unique identifiers
for the raw items or resources stored by this archive.
:param uri: URI to the resource
:param payload: payload of the request needed to fetch the resource
:param headers: headers of the request needed to fetch the resource
:returns: a SHA1 hash code
"""
def dict_to_json_str(data):
return json.dumps(data, sort_keys=True)
content = ':'.join([uri, dict_to_json_str(payload), dict_to_json_str(headers)])
hashcode = hashlib.sha1(content.encode('utf-8'))
return hashcode.hexdigest() | [
"def",
"make_hashcode",
"(",
"uri",
",",
"payload",
",",
"headers",
")",
":",
"def",
"dict_to_json_str",
"(",
"data",
")",
":",
"return",
"json",
".",
"dumps",
"(",
"data",
",",
"sort_keys",
"=",
"True",
")",
"content",
"=",
"':'",
".",
"join",
"(",
"[",
"uri",
",",
"dict_to_json_str",
"(",
"payload",
")",
",",
"dict_to_json_str",
"(",
"headers",
")",
"]",
")",
"hashcode",
"=",
"hashlib",
".",
"sha1",
"(",
"content",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"return",
"hashcode",
".",
"hexdigest",
"(",
")"
] | python | Generate a SHA1 based on the given arguments.
Hashcodes created by this method will used as unique identifiers
for the raw items or resources stored by this archive.
:param uri: URI to the resource
:param payload: payload of the request needed to fetch the resource
:param headers: headers of the request needed to fetch the resource
:returns: a SHA1 hash code | false |
1,948,632 | def __init__(self,
hostname=None,
port=None,
cert=None,
key=None,
ca=None,
ssl_version=None,
username=None,
password=None,
config='client',
config_file=None,
kmip_version=None):
"""
Construct a ProxyKmipClient.
Args:
hostname (string): The host or IP address of a KMIP appliance.
Optional, defaults to None.
port (int): The port number used to establish a connection to a
KMIP appliance. Usually 5696 for KMIP applications. Optional,
defaults to None.
cert (string): The path to the client's certificate. Optional,
defaults to None.
key (string): The path to the key for the client's certificate.
Optional, defaults to None.
ca (string): The path to the CA certificate used to verify the
server's certificate. Optional, defaults to None.
ssl_version (string): The name of the ssl version to use for the
connection. Example: 'PROTOCOL_SSLv23'. Optional, defaults to
None.
username (string): The username of the KMIP appliance account to
use for operations. Optional, defaults to None.
password (string): The password of the KMIP appliance account to
use for operations. Optional, defaults to None.
config (string): The name of a section in the PyKMIP configuration
file. Use to load a specific set of configuration settings from
the configuration file, instead of specifying them manually.
Optional, defaults to the default client section, 'client'.
config_file (string): The path to the client's configuration file.
Optional, defaults to None.
kmip_version (KMIPVersion): The KMIP version the client should use
when making requests. Optional, defaults to None. If None at
request time, the client will use KMIP 1.2.
"""
self.logger = logging.getLogger(__name__)
self.attribute_factory = attributes.AttributeFactory()
self.object_factory = factory.ObjectFactory()
# TODO (peter-hamilton) Consider adding validation checks for inputs.
self.proxy = KMIPProxy(
host=hostname,
port=port,
certfile=cert,
keyfile=key,
ca_certs=ca,
ssl_version=ssl_version,
username=username,
password=password,
config=config,
config_file=config_file,
kmip_version=kmip_version
)
# TODO (peter-hamilton) Add a multiprocessing lock for synchronization.
self._is_open = False | [
"def",
"__init__",
"(",
"self",
",",
"hostname",
"=",
"None",
",",
"port",
"=",
"None",
",",
"cert",
"=",
"None",
",",
"key",
"=",
"None",
",",
"ca",
"=",
"None",
",",
"ssl_version",
"=",
"None",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
",",
"config",
"=",
"'client'",
",",
"config_file",
"=",
"None",
",",
"kmip_version",
"=",
"None",
")",
":",
"self",
".",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"self",
".",
"attribute_factory",
"=",
"attributes",
".",
"AttributeFactory",
"(",
")",
"self",
".",
"object_factory",
"=",
"factory",
".",
"ObjectFactory",
"(",
")",
"self",
".",
"proxy",
"=",
"KMIPProxy",
"(",
"host",
"=",
"hostname",
",",
"port",
"=",
"port",
",",
"certfile",
"=",
"cert",
",",
"keyfile",
"=",
"key",
",",
"ca_certs",
"=",
"ca",
",",
"ssl_version",
"=",
"ssl_version",
",",
"username",
"=",
"username",
",",
"password",
"=",
"password",
",",
"config",
"=",
"config",
",",
"config_file",
"=",
"config_file",
",",
"kmip_version",
"=",
"kmip_version",
")",
"self",
".",
"_is_open",
"=",
"False"
] | python | Construct a ProxyKmipClient.
Args:
hostname (string): The host or IP address of a KMIP appliance.
Optional, defaults to None.
port (int): The port number used to establish a connection to a
KMIP appliance. Usually 5696 for KMIP applications. Optional,
defaults to None.
cert (string): The path to the client's certificate. Optional,
defaults to None.
key (string): The path to the key for the client's certificate.
Optional, defaults to None.
ca (string): The path to the CA certificate used to verify the
server's certificate. Optional, defaults to None.
ssl_version (string): The name of the ssl version to use for the
connection. Example: 'PROTOCOL_SSLv23'. Optional, defaults to
None.
username (string): The username of the KMIP appliance account to
use for operations. Optional, defaults to None.
password (string): The password of the KMIP appliance account to
use for operations. Optional, defaults to None.
config (string): The name of a section in the PyKMIP configuration
file. Use to load a specific set of configuration settings from
the configuration file, instead of specifying them manually.
Optional, defaults to the default client section, 'client'.
config_file (string): The path to the client's configuration file.
Optional, defaults to None.
kmip_version (KMIPVersion): The KMIP version the client should use
when making requests. Optional, defaults to None. If None at
request time, the client will use KMIP 1.2. | false |
2,447,701 | def get_chain_details_by_related_pdb_chains(self, pdb_id, chain_id, pfam_accs):
''' Returns a dict of SCOPe details using info
This returns Pfam-level information for a PDB chain i.e. no details on the protein, species, or domain will be returned.
If there are SCOPe entries for the associated Pfam accession numbers which agree then this function returns
pretty complete information.
'''
if not pfam_accs:
return None
associated_pdb_chains = set()
pfam_api = self.get_pfam_api()
for pfam_acc in pfam_accs:
associated_pdb_chains = associated_pdb_chains.union(pfam_api.get_pdb_chains_from_pfam_accession_number(pfam_acc))
hits = []
#class_count = {}
pfam_scop_mapping = {}
for pdb_chain_pair in associated_pdb_chains:
ass_pdb_id, ass_chain_id = pdb_chain_pair[0], pdb_chain_pair[1]
hit = self.get_chain_details(ass_pdb_id, chain = ass_chain_id, internal_function_call = True, pfam_scop_mapping = pfam_scop_mapping)
if hit and hit.get('chains'):
assert(len(hit['chains']) == 1)
hits.append(hit['chains'][ass_chain_id])
#for k, v in hit.iteritems():
#class_count[v['sccs']] = class_count.get(v['sccs'], 0)
#class_count[v['sccs']] += 1
#print(' %s, %s: %s' % (v['pdb_id'], k, v['sccs']))
#pprint.pprint(class_count)
allowed_scop_domains = map(int, map(set.intersection, pfam_scop_mapping.values())[0])
allowed_scop_domains = list(set((allowed_scop_domains or []) + (self.get_sunid_for_pfam_accs(pfam_accs) or [])))
filtered_hits = []
print(pfam_accs)
print(allowed_scop_domains)
print('%d hits' % len(hits))
for hit in hits:
domains_to_ignore = []
for k, v in hit['domains'].iteritems():
if v['sunid'] in allowed_scop_domains:
filtered_hits.append(v)
print('%d filtered_hits' % len(filtered_hits))
if not filtered_hits:
return None
d = self.get_basic_pdb_chain_information(pdb_id, chain_id)
d.update(self.get_common_fields(filtered_hits))
d.update(dict(
SCOPe_sources = 'Pfam + SCOPe',
SCOPe_search_fields = 'Pfam + link_pdb.pdb_chain_id',
SCOPe_trust_level = 3
))
# Add the lowest common classification over all related Pfam families
for k, v in sorted(self.levels.iteritems()):
d[v] = None
d.update(dict(self.get_common_hierarchy(filtered_hits)))
return d | [
"def",
"get_chain_details_by_related_pdb_chains",
"(",
"self",
",",
"pdb_id",
",",
"chain_id",
",",
"pfam_accs",
")",
":",
"if",
"not",
"pfam_accs",
":",
"return",
"None",
"associated_pdb_chains",
"=",
"set",
"(",
")",
"pfam_api",
"=",
"self",
".",
"get_pfam_api",
"(",
")",
"for",
"pfam_acc",
"in",
"pfam_accs",
":",
"associated_pdb_chains",
"=",
"associated_pdb_chains",
".",
"union",
"(",
"pfam_api",
".",
"get_pdb_chains_from_pfam_accession_number",
"(",
"pfam_acc",
")",
")",
"hits",
"=",
"[",
"]",
"pfam_scop_mapping",
"=",
"{",
"}",
"for",
"pdb_chain_pair",
"in",
"associated_pdb_chains",
":",
"ass_pdb_id",
",",
"ass_chain_id",
"=",
"pdb_chain_pair",
"[",
"0",
"]",
",",
"pdb_chain_pair",
"[",
"1",
"]",
"hit",
"=",
"self",
".",
"get_chain_details",
"(",
"ass_pdb_id",
",",
"chain",
"=",
"ass_chain_id",
",",
"internal_function_call",
"=",
"True",
",",
"pfam_scop_mapping",
"=",
"pfam_scop_mapping",
")",
"if",
"hit",
"and",
"hit",
".",
"get",
"(",
"'chains'",
")",
":",
"assert",
"(",
"len",
"(",
"hit",
"[",
"'chains'",
"]",
")",
"==",
"1",
")",
"hits",
".",
"append",
"(",
"hit",
"[",
"'chains'",
"]",
"[",
"ass_chain_id",
"]",
")",
"allowed_scop_domains",
"=",
"map",
"(",
"int",
",",
"map",
"(",
"set",
".",
"intersection",
",",
"pfam_scop_mapping",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
")",
"allowed_scop_domains",
"=",
"list",
"(",
"set",
"(",
"(",
"allowed_scop_domains",
"or",
"[",
"]",
")",
"+",
"(",
"self",
".",
"get_sunid_for_pfam_accs",
"(",
"pfam_accs",
")",
"or",
"[",
"]",
")",
")",
")",
"filtered_hits",
"=",
"[",
"]",
"print",
"(",
"pfam_accs",
")",
"print",
"(",
"allowed_scop_domains",
")",
"print",
"(",
"'%d hits'",
"%",
"len",
"(",
"hits",
")",
")",
"for",
"hit",
"in",
"hits",
":",
"domains_to_ignore",
"=",
"[",
"]",
"for",
"k",
",",
"v",
"in",
"hit",
"[",
"'domains'",
"]",
".",
"iteritems",
"(",
")",
":",
"if",
"v",
"[",
"'sunid'",
"]",
"in",
"allowed_scop_domains",
":",
"filtered_hits",
".",
"append",
"(",
"v",
")",
"print",
"(",
"'%d filtered_hits'",
"%",
"len",
"(",
"filtered_hits",
")",
")",
"if",
"not",
"filtered_hits",
":",
"return",
"None",
"d",
"=",
"self",
".",
"get_basic_pdb_chain_information",
"(",
"pdb_id",
",",
"chain_id",
")",
"d",
".",
"update",
"(",
"self",
".",
"get_common_fields",
"(",
"filtered_hits",
")",
")",
"d",
".",
"update",
"(",
"dict",
"(",
"SCOPe_sources",
"=",
"'Pfam + SCOPe'",
",",
"SCOPe_search_fields",
"=",
"'Pfam + link_pdb.pdb_chain_id'",
",",
"SCOPe_trust_level",
"=",
"3",
")",
")",
"for",
"k",
",",
"v",
"in",
"sorted",
"(",
"self",
".",
"levels",
".",
"iteritems",
"(",
")",
")",
":",
"d",
"[",
"v",
"]",
"=",
"None",
"d",
".",
"update",
"(",
"dict",
"(",
"self",
".",
"get_common_hierarchy",
"(",
"filtered_hits",
")",
")",
")",
"return",
"d"
] | python | Returns a dict of SCOPe details using info
This returns Pfam-level information for a PDB chain i.e. no details on the protein, species, or domain will be returned.
If there are SCOPe entries for the associated Pfam accession numbers which agree then this function returns
pretty complete information. | false |
1,847,562 | def email_url_config(cls, url, backend=None):
"""Parses an email URL."""
config = {}
url = urlparse(url) if not isinstance(url, cls.URL_CLASS) else url
# Remove query strings
path = url.path[1:]
path = unquote_plus(path.split('?', 2)[0])
# Update with environment configuration
config.update({
'EMAIL_FILE_PATH': path,
'EMAIL_HOST_USER': _cast_urlstr(url.username),
'EMAIL_HOST_PASSWORD': _cast_urlstr(url.password),
'EMAIL_HOST': url.hostname,
'EMAIL_PORT': _cast_int(url.port),
})
if backend:
config['EMAIL_BACKEND'] = backend
elif url.scheme not in cls.EMAIL_SCHEMES:
raise ImproperlyConfigured('Invalid email schema %s' % url.scheme)
elif url.scheme in cls.EMAIL_SCHEMES:
config['EMAIL_BACKEND'] = cls.EMAIL_SCHEMES[url.scheme]
if url.scheme in ('smtps', 'smtp+tls'):
config['EMAIL_USE_TLS'] = True
elif url.scheme == 'smtp+ssl':
config['EMAIL_USE_SSL'] = True
if url.query:
config_options = {}
for k, v in parse_qs(url.query).items():
opt = {k.upper(): _cast_int(v[0])}
if k.upper() in cls._EMAIL_BASE_OPTIONS:
config.update(opt)
else:
config_options.update(opt)
config['OPTIONS'] = config_options
return config | [
"def",
"email_url_config",
"(",
"cls",
",",
"url",
",",
"backend",
"=",
"None",
")",
":",
"config",
"=",
"{",
"}",
"url",
"=",
"urlparse",
"(",
"url",
")",
"if",
"not",
"isinstance",
"(",
"url",
",",
"cls",
".",
"URL_CLASS",
")",
"else",
"url",
"path",
"=",
"url",
".",
"path",
"[",
"1",
":",
"]",
"path",
"=",
"unquote_plus",
"(",
"path",
".",
"split",
"(",
"'?'",
",",
"2",
")",
"[",
"0",
"]",
")",
"config",
".",
"update",
"(",
"{",
"'EMAIL_FILE_PATH'",
":",
"path",
",",
"'EMAIL_HOST_USER'",
":",
"_cast_urlstr",
"(",
"url",
".",
"username",
")",
",",
"'EMAIL_HOST_PASSWORD'",
":",
"_cast_urlstr",
"(",
"url",
".",
"password",
")",
",",
"'EMAIL_HOST'",
":",
"url",
".",
"hostname",
",",
"'EMAIL_PORT'",
":",
"_cast_int",
"(",
"url",
".",
"port",
")",
",",
"}",
")",
"if",
"backend",
":",
"config",
"[",
"'EMAIL_BACKEND'",
"]",
"=",
"backend",
"elif",
"url",
".",
"scheme",
"not",
"in",
"cls",
".",
"EMAIL_SCHEMES",
":",
"raise",
"ImproperlyConfigured",
"(",
"'Invalid email schema %s'",
"%",
"url",
".",
"scheme",
")",
"elif",
"url",
".",
"scheme",
"in",
"cls",
".",
"EMAIL_SCHEMES",
":",
"config",
"[",
"'EMAIL_BACKEND'",
"]",
"=",
"cls",
".",
"EMAIL_SCHEMES",
"[",
"url",
".",
"scheme",
"]",
"if",
"url",
".",
"scheme",
"in",
"(",
"'smtps'",
",",
"'smtp+tls'",
")",
":",
"config",
"[",
"'EMAIL_USE_TLS'",
"]",
"=",
"True",
"elif",
"url",
".",
"scheme",
"==",
"'smtp+ssl'",
":",
"config",
"[",
"'EMAIL_USE_SSL'",
"]",
"=",
"True",
"if",
"url",
".",
"query",
":",
"config_options",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"parse_qs",
"(",
"url",
".",
"query",
")",
".",
"items",
"(",
")",
":",
"opt",
"=",
"{",
"k",
".",
"upper",
"(",
")",
":",
"_cast_int",
"(",
"v",
"[",
"0",
"]",
")",
"}",
"if",
"k",
".",
"upper",
"(",
")",
"in",
"cls",
".",
"_EMAIL_BASE_OPTIONS",
":",
"config",
".",
"update",
"(",
"opt",
")",
"else",
":",
"config_options",
".",
"update",
"(",
"opt",
")",
"config",
"[",
"'OPTIONS'",
"]",
"=",
"config_options",
"return",
"config"
] | python | Parses an email URL. | false |
1,792,267 | def gone_online(stream):
"""
Distributes the users online status to everyone he has dialog with
"""
while True:
packet = yield from stream.get()
session_id = packet.get('session_key')
if session_id:
user_owner = get_user_from_session(session_id)
if user_owner:
logger.debug('User ' + user_owner.username + ' gone online')
# find all connections including user_owner as opponent,
# send them a message that the user has gone online
online_opponents = list(filter(lambda x: x[1] == user_owner.username, ws_connections))
online_opponents_sockets = [ws_connections[i] for i in online_opponents]
yield from fanout_message(online_opponents_sockets,
{'type': 'gone-online', 'usernames': [user_owner.username]})
else:
pass # invalid session id
else:
pass | [
"def",
"gone_online",
"(",
"stream",
")",
":",
"while",
"True",
":",
"packet",
"=",
"yield",
"from",
"stream",
".",
"get",
"(",
")",
"session_id",
"=",
"packet",
".",
"get",
"(",
"'session_key'",
")",
"if",
"session_id",
":",
"user_owner",
"=",
"get_user_from_session",
"(",
"session_id",
")",
"if",
"user_owner",
":",
"logger",
".",
"debug",
"(",
"'User '",
"+",
"user_owner",
".",
"username",
"+",
"' gone online'",
")",
"online_opponents",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
"==",
"user_owner",
".",
"username",
",",
"ws_connections",
")",
")",
"online_opponents_sockets",
"=",
"[",
"ws_connections",
"[",
"i",
"]",
"for",
"i",
"in",
"online_opponents",
"]",
"yield",
"from",
"fanout_message",
"(",
"online_opponents_sockets",
",",
"{",
"'type'",
":",
"'gone-online'",
",",
"'usernames'",
":",
"[",
"user_owner",
".",
"username",
"]",
"}",
")",
"else",
":",
"pass",
"else",
":",
"pass"
] | python | Distributes the users online status to everyone he has dialog with | false |
2,204,053 | def light_3d(self, r, kwargs_list, k=None):
"""
computes 3d density at radius r
:param x: coordinate in units of arcsec relative to the center of the image
:type x: set or single 1d numpy array
"""
r = np.array(r, dtype=float)
flux = np.zeros_like(r)
for i, func in enumerate(self.func_list):
if k is None or k == i:
kwargs = {k: v for k, v in kwargs_list[i].items() if not k in ['center_x', 'center_y']}
if self.profile_type_list[i] in ['HERNQUIST', 'HERNQUIST_ELLIPSE', 'PJAFFE', 'PJAFFE_ELLIPSE',
'GAUSSIAN', 'GAUSSIAN_ELLIPSE', 'MULTI_GAUSSIAN',
'MULTI_GAUSSIAN_ELLIPSE', 'POWER_LAW']:
flux += func.light_3d(r, **kwargs)
else:
raise ValueError('Light model %s does not support a 3d light distribution!'
% self.profile_type_list[i])
return flux | [
"def",
"light_3d",
"(",
"self",
",",
"r",
",",
"kwargs_list",
",",
"k",
"=",
"None",
")",
":",
"r",
"=",
"np",
".",
"array",
"(",
"r",
",",
"dtype",
"=",
"float",
")",
"flux",
"=",
"np",
".",
"zeros_like",
"(",
"r",
")",
"for",
"i",
",",
"func",
"in",
"enumerate",
"(",
"self",
".",
"func_list",
")",
":",
"if",
"k",
"is",
"None",
"or",
"k",
"==",
"i",
":",
"kwargs",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"kwargs_list",
"[",
"i",
"]",
".",
"items",
"(",
")",
"if",
"not",
"k",
"in",
"[",
"'center_x'",
",",
"'center_y'",
"]",
"}",
"if",
"self",
".",
"profile_type_list",
"[",
"i",
"]",
"in",
"[",
"'HERNQUIST'",
",",
"'HERNQUIST_ELLIPSE'",
",",
"'PJAFFE'",
",",
"'PJAFFE_ELLIPSE'",
",",
"'GAUSSIAN'",
",",
"'GAUSSIAN_ELLIPSE'",
",",
"'MULTI_GAUSSIAN'",
",",
"'MULTI_GAUSSIAN_ELLIPSE'",
",",
"'POWER_LAW'",
"]",
":",
"flux",
"+=",
"func",
".",
"light_3d",
"(",
"r",
",",
"**",
"kwargs",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Light model %s does not support a 3d light distribution!'",
"%",
"self",
".",
"profile_type_list",
"[",
"i",
"]",
")",
"return",
"flux"
] | python | computes 3d density at radius r
:param x: coordinate in units of arcsec relative to the center of the image
:type x: set or single 1d numpy array | false |
1,931,523 | def load_pygame(filename, *args, **kwargs):
""" Load a TMX file, images, and return a TiledMap class
PYGAME USERS: Use me.
this utility has 'smart' tile loading. by default any tile without
transparent pixels will be loaded for quick blitting. if the tile has
transparent pixels, then it will be loaded with per-pixel alpha. this is
a per-tile, per-image check.
if a color key is specified as an argument, or in the tmx data, the
per-pixel alpha will not be used at all. if the tileset's image has colorkey
transparency set in Tiled, the util_pygam will return images that have their
transparency already set.
TL;DR:
Don't attempt to convert() or convert_alpha() the individual tiles. It is
already done for you.
"""
kwargs['image_loader'] = pygame_image_loader
return pytmx.TiledMap(filename, *args, **kwargs) | [
"def",
"load_pygame",
"(",
"filename",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"kwargs",
"[",
"'image_loader'",
"]",
"=",
"pygame_image_loader",
"return",
"pytmx",
".",
"TiledMap",
"(",
"filename",
",",
"*",
"args",
",",
"**",
"kwargs",
")"
] | python | Load a TMX file, images, and return a TiledMap class
PYGAME USERS: Use me.
this utility has 'smart' tile loading. by default any tile without
transparent pixels will be loaded for quick blitting. if the tile has
transparent pixels, then it will be loaded with per-pixel alpha. this is
a per-tile, per-image check.
if a color key is specified as an argument, or in the tmx data, the
per-pixel alpha will not be used at all. if the tileset's image has colorkey
transparency set in Tiled, the util_pygam will return images that have their
transparency already set.
TL;DR:
Don't attempt to convert() or convert_alpha() the individual tiles. It is
already done for you. | false |
2,027,784 | def get_email_addresses(self):
"""
: returns: dict of type and email address list
:rtype: dict(str, list(str))
"""
email_dict = {}
for child in self.vcard.getChildren():
if child.name == "EMAIL":
type = helpers.list_to_string(
self._get_types_for_vcard_object(child, "internet"), ", ")
if type not in email_dict:
email_dict[type] = []
email_dict[type].append(child.value)
# sort email address lists
for email_list in email_dict.values():
email_list.sort()
return email_dict | [
"def",
"get_email_addresses",
"(",
"self",
")",
":",
"email_dict",
"=",
"{",
"}",
"for",
"child",
"in",
"self",
".",
"vcard",
".",
"getChildren",
"(",
")",
":",
"if",
"child",
".",
"name",
"==",
"\"EMAIL\"",
":",
"type",
"=",
"helpers",
".",
"list_to_string",
"(",
"self",
".",
"_get_types_for_vcard_object",
"(",
"child",
",",
"\"internet\"",
")",
",",
"\", \"",
")",
"if",
"type",
"not",
"in",
"email_dict",
":",
"email_dict",
"[",
"type",
"]",
"=",
"[",
"]",
"email_dict",
"[",
"type",
"]",
".",
"append",
"(",
"child",
".",
"value",
")",
"for",
"email_list",
"in",
"email_dict",
".",
"values",
"(",
")",
":",
"email_list",
".",
"sort",
"(",
")",
"return",
"email_dict"
] | python | : returns: dict of type and email address list
:rtype: dict(str, list(str)) | false |
2,464,000 | def __init__(self, client, resource, options=None):
self._client = client
self._resource = resource
self._options = options
self._websocket = None
self._callback = None
self._response_callbacks = []
self._close_callbacks = []
self._closing = threading.Lock()
self._closed = False
"""True if this manager has already been closed."""
self._request_counter = 0
self._request_counter_lock = threading.Lock()
# Thread created in ``.open()``
self._consumer = None | [
"def",
"__init__",
"(",
"self",
",",
"client",
",",
"resource",
",",
"options",
"=",
"None",
")",
":",
"self",
".",
"_client",
"=",
"client",
"self",
".",
"_resource",
"=",
"resource",
"self",
".",
"_options",
"=",
"options",
"self",
".",
"_websocket",
"=",
"None",
"self",
".",
"_callback",
"=",
"None",
"self",
".",
"_response_callbacks",
"=",
"[",
"]",
"self",
".",
"_close_callbacks",
"=",
"[",
"]",
"self",
".",
"_closing",
"=",
"threading",
".",
"Lock",
"(",
")",
"self",
".",
"_closed",
"=",
"False",
"self",
".",
"_request_counter",
"=",
"0",
"self",
".",
"_request_counter_lock",
"=",
"threading",
".",
"Lock",
"(",
")",
"self",
".",
"_consumer",
"=",
"None"
] | python | True if this manager has already been closed. | false |
2,194,622 | def get_groups(self, table_name):
"""
Return list of all groups for a particular data type
"""
df = self.dm[table_name]
return list(df['group'].unique()) | [
"def",
"get_groups",
"(",
"self",
",",
"table_name",
")",
":",
"df",
"=",
"self",
".",
"dm",
"[",
"table_name",
"]",
"return",
"list",
"(",
"df",
"[",
"'group'",
"]",
".",
"unique",
"(",
")",
")"
] | python | Return list of all groups for a particular data type | false |
2,460,964 | def load_config(self, *args, **kwargs):
"""Load a config based on the arguments passed in.
The order of arguments passed in as \*args is significant. It indicates
the order of precedence used to load configuration values. Each
argument can be a string, dictionary or a tuple. There is a special
case string called 'ENVIRONMENT', otherwise it will attempt to load the
filename passed in as a string.
By default, if a string is provided, it will attempt to load the
file based on the file_type passed in on initialization. If you
want to load a mixture of json and yaml files, you can specify them
as the 3rd part of a tuple.
Examples:
You can load configurations in any of the following ways:
>>> my_spec = YapconfSpec({'foo': {'type': 'str'}})
>>> my_spec.load_config('/path/to/file')
>>> my_spec.load_config({'foo': 'bar'})
>>> my_spec.load_config('ENVIRONMENT')
>>> my_spec.load_config(('label', {'foo': 'bar'}))
>>> my_spec.load_config(('label', '/path/to/file.yaml', 'yaml'))
>>> my_spec.load_config(('label', '/path/to/file.json', 'json'))
You can of course combine each of these and the order will be
held correctly.
Args:
*args:
**kwargs: The only supported keyword argument is 'bootstrap'
which will indicate that only bootstrap configurations
should be loaded.
Returns:
box.Box: A Box object which is subclassed from dict. It should
behave exactly as a dictionary. This object is guaranteed to
contain at least all of your required configuration items.
Raises:
YapconfLoadError: If we attempt to load your args and something
goes wrong.
YapconfItemNotFound: If an item is required but could not be found
in the configuration.
YapconfItemError: If a possible value was found but the type
cannot be determined.
YapconfValueError: If a possible value is found but during
conversion, an exception was raised.
"""
bootstrap = kwargs.get('bootstrap', False)
overrides = self._generate_overrides(*args)
config = self._generate_config_from_overrides(overrides, bootstrap)
return Box(config) | [
"def",
"load_config",
"(",
"self",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"bootstrap",
"=",
"kwargs",
".",
"get",
"(",
"'bootstrap'",
",",
"False",
")",
"overrides",
"=",
"self",
".",
"_generate_overrides",
"(",
"*",
"args",
")",
"config",
"=",
"self",
".",
"_generate_config_from_overrides",
"(",
"overrides",
",",
"bootstrap",
")",
"return",
"Box",
"(",
"config",
")"
] | python | Load a config based on the arguments passed in.
The order of arguments passed in as \*args is significant. It indicates
the order of precedence used to load configuration values. Each
argument can be a string, dictionary or a tuple. There is a special
case string called 'ENVIRONMENT', otherwise it will attempt to load the
filename passed in as a string.
By default, if a string is provided, it will attempt to load the
file based on the file_type passed in on initialization. If you
want to load a mixture of json and yaml files, you can specify them
as the 3rd part of a tuple.
Examples:
You can load configurations in any of the following ways:
>>> my_spec = YapconfSpec({'foo': {'type': 'str'}})
>>> my_spec.load_config('/path/to/file')
>>> my_spec.load_config({'foo': 'bar'})
>>> my_spec.load_config('ENVIRONMENT')
>>> my_spec.load_config(('label', {'foo': 'bar'}))
>>> my_spec.load_config(('label', '/path/to/file.yaml', 'yaml'))
>>> my_spec.load_config(('label', '/path/to/file.json', 'json'))
You can of course combine each of these and the order will be
held correctly.
Args:
*args:
**kwargs: The only supported keyword argument is 'bootstrap'
which will indicate that only bootstrap configurations
should be loaded.
Returns:
box.Box: A Box object which is subclassed from dict. It should
behave exactly as a dictionary. This object is guaranteed to
contain at least all of your required configuration items.
Raises:
YapconfLoadError: If we attempt to load your args and something
goes wrong.
YapconfItemNotFound: If an item is required but could not be found
in the configuration.
YapconfItemError: If a possible value was found but the type
cannot be determined.
YapconfValueError: If a possible value is found but during
conversion, an exception was raised. | false |
1,958,678 | def Open(self, file_object):
"""Opens the CPIO archive file.
Args:
file_object (FileIO): a file-like object.
Raises:
IOError: if the file format signature is not supported.
OSError: if the file format signature is not supported.
"""
file_object.seek(0, os.SEEK_SET)
signature_data = file_object.read(6)
self.file_format = None
if len(signature_data) > 2:
if signature_data[:2] == self._CPIO_SIGNATURE_BINARY_BIG_ENDIAN:
self.file_format = 'bin-big-endian'
elif signature_data[:2] == self._CPIO_SIGNATURE_BINARY_LITTLE_ENDIAN:
self.file_format = 'bin-little-endian'
elif signature_data == self._CPIO_SIGNATURE_PORTABLE_ASCII:
self.file_format = 'odc'
elif signature_data == self._CPIO_SIGNATURE_NEW_ASCII:
self.file_format = 'newc'
elif signature_data == self._CPIO_SIGNATURE_NEW_ASCII_WITH_CHECKSUM:
self.file_format = 'crc'
if self.file_format is None:
raise IOError('Unsupported CPIO format.')
self._file_object = file_object
self._file_size = file_object.get_size()
self._ReadFileEntries(self._file_object) | [
"def",
"Open",
"(",
"self",
",",
"file_object",
")",
":",
"file_object",
".",
"seek",
"(",
"0",
",",
"os",
".",
"SEEK_SET",
")",
"signature_data",
"=",
"file_object",
".",
"read",
"(",
"6",
")",
"self",
".",
"file_format",
"=",
"None",
"if",
"len",
"(",
"signature_data",
")",
">",
"2",
":",
"if",
"signature_data",
"[",
":",
"2",
"]",
"==",
"self",
".",
"_CPIO_SIGNATURE_BINARY_BIG_ENDIAN",
":",
"self",
".",
"file_format",
"=",
"'bin-big-endian'",
"elif",
"signature_data",
"[",
":",
"2",
"]",
"==",
"self",
".",
"_CPIO_SIGNATURE_BINARY_LITTLE_ENDIAN",
":",
"self",
".",
"file_format",
"=",
"'bin-little-endian'",
"elif",
"signature_data",
"==",
"self",
".",
"_CPIO_SIGNATURE_PORTABLE_ASCII",
":",
"self",
".",
"file_format",
"=",
"'odc'",
"elif",
"signature_data",
"==",
"self",
".",
"_CPIO_SIGNATURE_NEW_ASCII",
":",
"self",
".",
"file_format",
"=",
"'newc'",
"elif",
"signature_data",
"==",
"self",
".",
"_CPIO_SIGNATURE_NEW_ASCII_WITH_CHECKSUM",
":",
"self",
".",
"file_format",
"=",
"'crc'",
"if",
"self",
".",
"file_format",
"is",
"None",
":",
"raise",
"IOError",
"(",
"'Unsupported CPIO format.'",
")",
"self",
".",
"_file_object",
"=",
"file_object",
"self",
".",
"_file_size",
"=",
"file_object",
".",
"get_size",
"(",
")",
"self",
".",
"_ReadFileEntries",
"(",
"self",
".",
"_file_object",
")"
] | python | Opens the CPIO archive file.
Args:
file_object (FileIO): a file-like object.
Raises:
IOError: if the file format signature is not supported.
OSError: if the file format signature is not supported. | false |
1,986,689 | def _compute_site_response_term(self, C, sites, pga1000):
"""
Compute and return site response model term
This GMPE adopts the same site response scaling model of
Walling et al (2008) as implemented in the Abrahamson & Silva (2008)
GMPE. The functional form is retained here.
"""
vs_star = sites.vs30.copy()
vs_star[vs_star > 1000.0] = 1000.
arg = vs_star / C["vlin"]
site_resp_term = C["theta12"] * np.log(arg)
# Get linear scaling term
idx = sites.vs30 >= C["vlin"]
site_resp_term[idx] += (C["b"] * self.CONSTS["n"] * np.log(arg[idx]))
# Get nonlinear scaling term
idx = np.logical_not(idx)
site_resp_term[idx] += (
-C["b"] * np.log(pga1000[idx] + self.CONSTS["c"]) +
C["b"] * np.log(pga1000[idx] + self.CONSTS["c"] *
(arg[idx] ** self.CONSTS["n"])))
return site_resp_term | [
"def",
"_compute_site_response_term",
"(",
"self",
",",
"C",
",",
"sites",
",",
"pga1000",
")",
":",
"vs_star",
"=",
"sites",
".",
"vs30",
".",
"copy",
"(",
")",
"vs_star",
"[",
"vs_star",
">",
"1000.0",
"]",
"=",
"1000.",
"arg",
"=",
"vs_star",
"/",
"C",
"[",
"\"vlin\"",
"]",
"site_resp_term",
"=",
"C",
"[",
"\"theta12\"",
"]",
"*",
"np",
".",
"log",
"(",
"arg",
")",
"idx",
"=",
"sites",
".",
"vs30",
">=",
"C",
"[",
"\"vlin\"",
"]",
"site_resp_term",
"[",
"idx",
"]",
"+=",
"(",
"C",
"[",
"\"b\"",
"]",
"*",
"self",
".",
"CONSTS",
"[",
"\"n\"",
"]",
"*",
"np",
".",
"log",
"(",
"arg",
"[",
"idx",
"]",
")",
")",
"idx",
"=",
"np",
".",
"logical_not",
"(",
"idx",
")",
"site_resp_term",
"[",
"idx",
"]",
"+=",
"(",
"-",
"C",
"[",
"\"b\"",
"]",
"*",
"np",
".",
"log",
"(",
"pga1000",
"[",
"idx",
"]",
"+",
"self",
".",
"CONSTS",
"[",
"\"c\"",
"]",
")",
"+",
"C",
"[",
"\"b\"",
"]",
"*",
"np",
".",
"log",
"(",
"pga1000",
"[",
"idx",
"]",
"+",
"self",
".",
"CONSTS",
"[",
"\"c\"",
"]",
"*",
"(",
"arg",
"[",
"idx",
"]",
"**",
"self",
".",
"CONSTS",
"[",
"\"n\"",
"]",
")",
")",
")",
"return",
"site_resp_term"
] | python | Compute and return site response model term
This GMPE adopts the same site response scaling model of
Walling et al (2008) as implemented in the Abrahamson & Silva (2008)
GMPE. The functional form is retained here. | false |
1,862,590 | def _validate(self, args, kwargs):
"""Validate option registration arguments."""
def error(exception_type, arg_name=None, **msg_kwargs):
if arg_name is None:
arg_name = args[0] if args else '<unknown>'
raise exception_type(self.scope, arg_name, **msg_kwargs)
if not args:
error(NoOptionNames)
# validate args.
for arg in args:
if not arg.startswith('-'):
error(OptionNameDash, arg_name=arg)
if not arg.startswith('--') and len(arg) > 2:
error(OptionNameDoubleDash, arg_name=arg)
# Validate kwargs.
if 'implicit_value' in kwargs and kwargs['implicit_value'] is None:
error(ImplicitValIsNone)
# Note: we check for list here, not list_option, because we validate the provided kwargs,
# not the ones we modified. However we temporarily also allow list_option, until the
# deprecation is complete.
if 'member_type' in kwargs and kwargs.get('type', str) not in [list, list_option]:
error(MemberTypeNotAllowed, type_=kwargs.get('type', str).__name__)
if kwargs.get('member_type', str) not in self._allowed_member_types:
error(InvalidMemberType, member_type=kwargs.get('member_type', str).__name__)
for kwarg in kwargs:
if kwarg not in self._allowed_registration_kwargs:
error(InvalidKwarg, kwarg=kwarg)
# Ensure `daemon=True` can't be passed on non-global scopes (except for `recursive=True`).
if (kwarg == 'daemon' and self._scope != GLOBAL_SCOPE and kwargs.get('recursive') is False):
error(InvalidKwargNonGlobalScope, kwarg=kwarg)
removal_version = kwargs.get('removal_version')
if removal_version is not None:
validate_deprecation_semver(removal_version, 'removal version') | [
"def",
"_validate",
"(",
"self",
",",
"args",
",",
"kwargs",
")",
":",
"def",
"error",
"(",
"exception_type",
",",
"arg_name",
"=",
"None",
",",
"**",
"msg_kwargs",
")",
":",
"if",
"arg_name",
"is",
"None",
":",
"arg_name",
"=",
"args",
"[",
"0",
"]",
"if",
"args",
"else",
"'<unknown>'",
"raise",
"exception_type",
"(",
"self",
".",
"scope",
",",
"arg_name",
",",
"**",
"msg_kwargs",
")",
"if",
"not",
"args",
":",
"error",
"(",
"NoOptionNames",
")",
"for",
"arg",
"in",
"args",
":",
"if",
"not",
"arg",
".",
"startswith",
"(",
"'-'",
")",
":",
"error",
"(",
"OptionNameDash",
",",
"arg_name",
"=",
"arg",
")",
"if",
"not",
"arg",
".",
"startswith",
"(",
"'--'",
")",
"and",
"len",
"(",
"arg",
")",
">",
"2",
":",
"error",
"(",
"OptionNameDoubleDash",
",",
"arg_name",
"=",
"arg",
")",
"if",
"'implicit_value'",
"in",
"kwargs",
"and",
"kwargs",
"[",
"'implicit_value'",
"]",
"is",
"None",
":",
"error",
"(",
"ImplicitValIsNone",
")",
"if",
"'member_type'",
"in",
"kwargs",
"and",
"kwargs",
".",
"get",
"(",
"'type'",
",",
"str",
")",
"not",
"in",
"[",
"list",
",",
"list_option",
"]",
":",
"error",
"(",
"MemberTypeNotAllowed",
",",
"type_",
"=",
"kwargs",
".",
"get",
"(",
"'type'",
",",
"str",
")",
".",
"__name__",
")",
"if",
"kwargs",
".",
"get",
"(",
"'member_type'",
",",
"str",
")",
"not",
"in",
"self",
".",
"_allowed_member_types",
":",
"error",
"(",
"InvalidMemberType",
",",
"member_type",
"=",
"kwargs",
".",
"get",
"(",
"'member_type'",
",",
"str",
")",
".",
"__name__",
")",
"for",
"kwarg",
"in",
"kwargs",
":",
"if",
"kwarg",
"not",
"in",
"self",
".",
"_allowed_registration_kwargs",
":",
"error",
"(",
"InvalidKwarg",
",",
"kwarg",
"=",
"kwarg",
")",
"if",
"(",
"kwarg",
"==",
"'daemon'",
"and",
"self",
".",
"_scope",
"!=",
"GLOBAL_SCOPE",
"and",
"kwargs",
".",
"get",
"(",
"'recursive'",
")",
"is",
"False",
")",
":",
"error",
"(",
"InvalidKwargNonGlobalScope",
",",
"kwarg",
"=",
"kwarg",
")",
"removal_version",
"=",
"kwargs",
".",
"get",
"(",
"'removal_version'",
")",
"if",
"removal_version",
"is",
"not",
"None",
":",
"validate_deprecation_semver",
"(",
"removal_version",
",",
"'removal version'",
")"
] | python | Validate option registration arguments. | false |
2,457,531 | def d3flare_json(metadata, file=None, **options):
""" Converts the *metadata* dictionary of a container or field into a
``flare.json`` formatted string or formatted stream written to the *file*
The ``flare.json`` format is defined by the `d3.js <https://d3js.org/>`_ graphic
library.
The ``flare.json`` format looks like this:
.. code-block:: JSON
{
"class": "class of the field or container",
"name": "name of the field or container",
"size": "bit size of the field",
"value": "value of the field",
"children": []
}
:param dict metadata: metadata generated from a :class:`Structure`,
:class:`Sequence`, :class:`Array` or any :class:`Field` instance.
:param file file: file-like object.
"""
def convert(root):
dct = OrderedDict()
item_type = root.get('type')
dct['class'] = root.get('class')
dct['name'] = root.get('name')
if item_type is ItemClass.Field.name:
dct['size'] = root.get('size')
dct['value'] = root.get('value')
children = root.get('member')
if children:
# Any containable class with children
dct['children'] = list()
if item_type is ItemClass.Pointer.name:
# Create pointer address field as child
field = OrderedDict()
field['class'] = dct['class']
field['name'] = '*' + dct['name']
field['size'] = root.get('size')
field['value'] = root.get('value')
dct['children'].append(field)
for child in map(convert, children):
# Recursive function call map(fnc, args).
dct['children'].append(child)
elif item_type is ItemClass.Pointer.name:
# Null pointer (None pointer)
dct['size'] = root.get('size')
dct['value'] = root.get('value')
return dct
options['indent'] = options.get('indent', 2)
if file:
return json.dump(convert(metadata), file, **options)
else:
return json.dumps(convert(metadata), **options) | [
"def",
"d3flare_json",
"(",
"metadata",
",",
"file",
"=",
"None",
",",
"**",
"options",
")",
":",
"def",
"convert",
"(",
"root",
")",
":",
"dct",
"=",
"OrderedDict",
"(",
")",
"item_type",
"=",
"root",
".",
"get",
"(",
"'type'",
")",
"dct",
"[",
"'class'",
"]",
"=",
"root",
".",
"get",
"(",
"'class'",
")",
"dct",
"[",
"'name'",
"]",
"=",
"root",
".",
"get",
"(",
"'name'",
")",
"if",
"item_type",
"is",
"ItemClass",
".",
"Field",
".",
"name",
":",
"dct",
"[",
"'size'",
"]",
"=",
"root",
".",
"get",
"(",
"'size'",
")",
"dct",
"[",
"'value'",
"]",
"=",
"root",
".",
"get",
"(",
"'value'",
")",
"children",
"=",
"root",
".",
"get",
"(",
"'member'",
")",
"if",
"children",
":",
"dct",
"[",
"'children'",
"]",
"=",
"list",
"(",
")",
"if",
"item_type",
"is",
"ItemClass",
".",
"Pointer",
".",
"name",
":",
"field",
"=",
"OrderedDict",
"(",
")",
"field",
"[",
"'class'",
"]",
"=",
"dct",
"[",
"'class'",
"]",
"field",
"[",
"'name'",
"]",
"=",
"'*'",
"+",
"dct",
"[",
"'name'",
"]",
"field",
"[",
"'size'",
"]",
"=",
"root",
".",
"get",
"(",
"'size'",
")",
"field",
"[",
"'value'",
"]",
"=",
"root",
".",
"get",
"(",
"'value'",
")",
"dct",
"[",
"'children'",
"]",
".",
"append",
"(",
"field",
")",
"for",
"child",
"in",
"map",
"(",
"convert",
",",
"children",
")",
":",
"dct",
"[",
"'children'",
"]",
".",
"append",
"(",
"child",
")",
"elif",
"item_type",
"is",
"ItemClass",
".",
"Pointer",
".",
"name",
":",
"dct",
"[",
"'size'",
"]",
"=",
"root",
".",
"get",
"(",
"'size'",
")",
"dct",
"[",
"'value'",
"]",
"=",
"root",
".",
"get",
"(",
"'value'",
")",
"return",
"dct",
"options",
"[",
"'indent'",
"]",
"=",
"options",
".",
"get",
"(",
"'indent'",
",",
"2",
")",
"if",
"file",
":",
"return",
"json",
".",
"dump",
"(",
"convert",
"(",
"metadata",
")",
",",
"file",
",",
"**",
"options",
")",
"else",
":",
"return",
"json",
".",
"dumps",
"(",
"convert",
"(",
"metadata",
")",
",",
"**",
"options",
")"
] | python | Converts the *metadata* dictionary of a container or field into a
``flare.json`` formatted string or formatted stream written to the *file*
The ``flare.json`` format is defined by the `d3.js <https://d3js.org/>`_ graphic
library.
The ``flare.json`` format looks like this:
.. code-block:: JSON
{
"class": "class of the field or container",
"name": "name of the field or container",
"size": "bit size of the field",
"value": "value of the field",
"children": []
}
:param dict metadata: metadata generated from a :class:`Structure`,
:class:`Sequence`, :class:`Array` or any :class:`Field` instance.
:param file file: file-like object. | false |
2,509,811 | def t_name(self, s):
r'[A-Za-z_][A-Za-z_0-9]*'
if s in RESERVED_WORDS:
self.add_token(s.upper(), s)
else:
self.add_token('NAME', s) | [
"def",
"t_name",
"(",
"self",
",",
"s",
")",
":",
"if",
"s",
"in",
"RESERVED_WORDS",
":",
"self",
".",
"add_token",
"(",
"s",
".",
"upper",
"(",
")",
",",
"s",
")",
"else",
":",
"self",
".",
"add_token",
"(",
"'NAME'",
",",
"s",
")"
] | python | r'[A-Za-z_][A-Za-z_0-9]* | false |
2,612,573 | def make_error_redirect(self, authorization_error=None):
""" Return a Django ``HttpResponseRedirect`` describing the request failure.
If the :py:meth:`validate` method raises an error, the authorization
endpoint should return the result of calling this method like so:
>>> auth_code_generator = (
>>> AuthorizationCodeGenerator('/oauth2/missing_redirect_uri/'))
>>> try:
>>> auth_code_generator.validate(request)
>>> except AuthorizationError as authorization_error:
>>> return auth_code_generator.make_error_redirect(authorization_error)
If there is no known Client ``redirect_uri`` (because it is malformed, or
the Client is invalid, or if the supplied ``redirect_uri`` does not match
the regsitered value, or some other request failure) then the response will
redirect to the ``missing_redirect_uri`` passed to the :py:meth:`__init__`
method.
Also used to signify user denial; call this method without passing in the
optional ``authorization_error`` argument to return a generic
:py:class:`AccessDenied` message.
>>> if not user_accepted_request:
>>> return auth_code_generator.make_error_redirect()
"""
if not self.redirect_uri:
return HttpResponseRedirect(self.missing_redirect_uri)
authorization_error = (authorization_error or
AccessDenied('user denied the request'))
response_params = get_error_details(authorization_error)
# From http://tools.ietf.org/html/rfc6749#section-4.1.2.1 :
#
# REQUIRED if the "state" parameter was present in the client
# authorization request. The exact value received from the
# client.
#
if self.state is not None:
response_params['state'] = self.state
return HttpResponseRedirect(
update_parameters(self.redirect_uri, response_params)) | [
"def",
"make_error_redirect",
"(",
"self",
",",
"authorization_error",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"redirect_uri",
":",
"return",
"HttpResponseRedirect",
"(",
"self",
".",
"missing_redirect_uri",
")",
"authorization_error",
"=",
"(",
"authorization_error",
"or",
"AccessDenied",
"(",
"'user denied the request'",
")",
")",
"response_params",
"=",
"get_error_details",
"(",
"authorization_error",
")",
"if",
"self",
".",
"state",
"is",
"not",
"None",
":",
"response_params",
"[",
"'state'",
"]",
"=",
"self",
".",
"state",
"return",
"HttpResponseRedirect",
"(",
"update_parameters",
"(",
"self",
".",
"redirect_uri",
",",
"response_params",
")",
")"
] | python | Return a Django ``HttpResponseRedirect`` describing the request failure.
If the :py:meth:`validate` method raises an error, the authorization
endpoint should return the result of calling this method like so:
>>> auth_code_generator = (
>>> AuthorizationCodeGenerator('/oauth2/missing_redirect_uri/'))
>>> try:
>>> auth_code_generator.validate(request)
>>> except AuthorizationError as authorization_error:
>>> return auth_code_generator.make_error_redirect(authorization_error)
If there is no known Client ``redirect_uri`` (because it is malformed, or
the Client is invalid, or if the supplied ``redirect_uri`` does not match
the regsitered value, or some other request failure) then the response will
redirect to the ``missing_redirect_uri`` passed to the :py:meth:`__init__`
method.
Also used to signify user denial; call this method without passing in the
optional ``authorization_error`` argument to return a generic
:py:class:`AccessDenied` message.
>>> if not user_accepted_request:
>>> return auth_code_generator.make_error_redirect() | false |
1,752,851 | def parse(argv, rules=None, config=None, **kwargs):
"""Parse the given arg vector with the default Splunk command rules."""
parser_ = parser(rules, **kwargs)
if config is not None: parser_.loadrc(config)
return parser_.parse(argv).result | [
"def",
"parse",
"(",
"argv",
",",
"rules",
"=",
"None",
",",
"config",
"=",
"None",
",",
"**",
"kwargs",
")",
":",
"parser_",
"=",
"parser",
"(",
"rules",
",",
"**",
"kwargs",
")",
"if",
"config",
"is",
"not",
"None",
":",
"parser_",
".",
"loadrc",
"(",
"config",
")",
"return",
"parser_",
".",
"parse",
"(",
"argv",
")",
".",
"result"
] | python | Parse the given arg vector with the default Splunk command rules. | false |
2,015,573 | def validate_sort_fields(self):
""" Take care of sorting.
"""
sort_fields = ','.join(self.options.sort_fields)
if sort_fields == '*':
sort_fields = self.get_output_fields()
return formatting.validate_sort_fields(sort_fields or config.sort_fields) | [
"def",
"validate_sort_fields",
"(",
"self",
")",
":",
"sort_fields",
"=",
"','",
".",
"join",
"(",
"self",
".",
"options",
".",
"sort_fields",
")",
"if",
"sort_fields",
"==",
"'*'",
":",
"sort_fields",
"=",
"self",
".",
"get_output_fields",
"(",
")",
"return",
"formatting",
".",
"validate_sort_fields",
"(",
"sort_fields",
"or",
"config",
".",
"sort_fields",
")"
] | python | Take care of sorting. | false |
2,198,117 | def search(cls, name, lookup=[]):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if os.path.isfile(name): return name
for spath in lookup:
fname = os.path.join(spath, name)
if os.path.isfile(fname):
return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext) | [
"def",
"search",
"(",
"cls",
",",
"name",
",",
"lookup",
"=",
"[",
"]",
")",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"name",
")",
":",
"return",
"name",
"for",
"spath",
"in",
"lookup",
":",
"fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"spath",
",",
"name",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"fname",
")",
":",
"return",
"fname",
"for",
"ext",
"in",
"cls",
".",
"extensions",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"'%s.%s'",
"%",
"(",
"fname",
",",
"ext",
")",
")",
":",
"return",
"'%s.%s'",
"%",
"(",
"fname",
",",
"ext",
")"
] | python | Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. | false |
2,420,961 | def init(self, projectname=None, description=None, **kwargs):
"""
Initialize a new experiment
Parameters
----------
projectname: str
The name of the project that shall be used. If None, the last one
created will be used
description: str
A short summary of the experiment
``**kwargs``
Keyword arguments passed to the :meth:`app_main` method
Notes
-----
If the experiment is None, a new experiment will be created
"""
self.app_main(**kwargs)
experiments = self.config.experiments
experiment = self._experiment
if experiment is None and not experiments:
experiment = self.name + '_exp0'
elif experiment is None:
try:
experiment = utils.get_next_name(self.experiment)
except ValueError:
raise ValueError(
"Could not estimate an experiment id! Please use the "
"experiment argument to provide an id.")
self.experiment = experiment
if self.is_archived(experiment):
raise ValueError(
"The specified experiment has already been archived! Run "
"``%s -id %s unarchive`` first" % (self.name, experiment))
if projectname is None:
projectname = self.projectname
else:
self.projectname = projectname
self.logger.info("Initializing experiment %s of project %s",
experiment, projectname)
exp_dict = experiments.setdefault(experiment, OrderedDict())
if description is not None:
exp_dict['description'] = description
exp_dict['project'] = projectname
exp_dict['expdir'] = exp_dir = osp.join('experiments', experiment)
exp_dir = osp.join(self.config.projects[projectname]['root'], exp_dir)
exp_dict['timestamps'] = OrderedDict()
if not os.path.exists(exp_dir):
self.logger.debug(" Creating experiment directory %s", exp_dir)
os.makedirs(exp_dir)
self.fix_paths(exp_dict)
return exp_dict | [
"def",
"init",
"(",
"self",
",",
"projectname",
"=",
"None",
",",
"description",
"=",
"None",
",",
"**",
"kwargs",
")",
":",
"self",
".",
"app_main",
"(",
"**",
"kwargs",
")",
"experiments",
"=",
"self",
".",
"config",
".",
"experiments",
"experiment",
"=",
"self",
".",
"_experiment",
"if",
"experiment",
"is",
"None",
"and",
"not",
"experiments",
":",
"experiment",
"=",
"self",
".",
"name",
"+",
"'_exp0'",
"elif",
"experiment",
"is",
"None",
":",
"try",
":",
"experiment",
"=",
"utils",
".",
"get_next_name",
"(",
"self",
".",
"experiment",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"Could not estimate an experiment id! Please use the \"",
"\"experiment argument to provide an id.\"",
")",
"self",
".",
"experiment",
"=",
"experiment",
"if",
"self",
".",
"is_archived",
"(",
"experiment",
")",
":",
"raise",
"ValueError",
"(",
"\"The specified experiment has already been archived! Run \"",
"\"``%s -id %s unarchive`` first\"",
"%",
"(",
"self",
".",
"name",
",",
"experiment",
")",
")",
"if",
"projectname",
"is",
"None",
":",
"projectname",
"=",
"self",
".",
"projectname",
"else",
":",
"self",
".",
"projectname",
"=",
"projectname",
"self",
".",
"logger",
".",
"info",
"(",
"\"Initializing experiment %s of project %s\"",
",",
"experiment",
",",
"projectname",
")",
"exp_dict",
"=",
"experiments",
".",
"setdefault",
"(",
"experiment",
",",
"OrderedDict",
"(",
")",
")",
"if",
"description",
"is",
"not",
"None",
":",
"exp_dict",
"[",
"'description'",
"]",
"=",
"description",
"exp_dict",
"[",
"'project'",
"]",
"=",
"projectname",
"exp_dict",
"[",
"'expdir'",
"]",
"=",
"exp_dir",
"=",
"osp",
".",
"join",
"(",
"'experiments'",
",",
"experiment",
")",
"exp_dir",
"=",
"osp",
".",
"join",
"(",
"self",
".",
"config",
".",
"projects",
"[",
"projectname",
"]",
"[",
"'root'",
"]",
",",
"exp_dir",
")",
"exp_dict",
"[",
"'timestamps'",
"]",
"=",
"OrderedDict",
"(",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"exp_dir",
")",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"\" Creating experiment directory %s\"",
",",
"exp_dir",
")",
"os",
".",
"makedirs",
"(",
"exp_dir",
")",
"self",
".",
"fix_paths",
"(",
"exp_dict",
")",
"return",
"exp_dict"
] | python | Initialize a new experiment
Parameters
----------
projectname: str
The name of the project that shall be used. If None, the last one
created will be used
description: str
A short summary of the experiment
``**kwargs``
Keyword arguments passed to the :meth:`app_main` method
Notes
-----
If the experiment is None, a new experiment will be created | false |
2,708,702 | def notebook_exists(self, notebook_id):
"""Does a notebook exist?"""
if notebook_id not in self.mapping:
return False
path = self.get_path_by_name(self.mapping[notebook_id])
return os.path.isfile(path) | [
"def",
"notebook_exists",
"(",
"self",
",",
"notebook_id",
")",
":",
"if",
"notebook_id",
"not",
"in",
"self",
".",
"mapping",
":",
"return",
"False",
"path",
"=",
"self",
".",
"get_path_by_name",
"(",
"self",
".",
"mapping",
"[",
"notebook_id",
"]",
")",
"return",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")"
] | python | Does a notebook exist? | false |
1,978,352 | def to_buffer(f):
"""
Decorator converting all strings and iterators/iterables into Buffers.
"""
@functools.wraps(f)
def wrap(*args, **kwargs):
iterator = kwargs.get('iterator', args[0])
if not isinstance(iterator, Buffer):
iterator = Buffer(iterator)
return f(iterator, *args[1:], **kwargs)
return wrap | [
"def",
"to_buffer",
"(",
"f",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"f",
")",
"def",
"wrap",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"iterator",
"=",
"kwargs",
".",
"get",
"(",
"'iterator'",
",",
"args",
"[",
"0",
"]",
")",
"if",
"not",
"isinstance",
"(",
"iterator",
",",
"Buffer",
")",
":",
"iterator",
"=",
"Buffer",
"(",
"iterator",
")",
"return",
"f",
"(",
"iterator",
",",
"*",
"args",
"[",
"1",
":",
"]",
",",
"**",
"kwargs",
")",
"return",
"wrap"
] | python | Decorator converting all strings and iterators/iterables into Buffers. | false |
2,665,773 | def validate(self, meta, val):
"""Validate an account_id"""
val = string_or_int_as_string_spec().normalise(meta, val)
if not regexes['amazon_account_id'].match(val):
raise BadOption("Account id must match a particular regex", got=val, should_match=regexes['amazon_account_id'].pattern)
return val | [
"def",
"validate",
"(",
"self",
",",
"meta",
",",
"val",
")",
":",
"val",
"=",
"string_or_int_as_string_spec",
"(",
")",
".",
"normalise",
"(",
"meta",
",",
"val",
")",
"if",
"not",
"regexes",
"[",
"'amazon_account_id'",
"]",
".",
"match",
"(",
"val",
")",
":",
"raise",
"BadOption",
"(",
"\"Account id must match a particular regex\"",
",",
"got",
"=",
"val",
",",
"should_match",
"=",
"regexes",
"[",
"'amazon_account_id'",
"]",
".",
"pattern",
")",
"return",
"val"
] | python | Validate an account_id | false |
2,154,388 | def update(path,value,timestamp=None):
"""update(path,value,timestamp=None)
path is a string
value is a float
timestamp is either an int or float
"""
value = float(value)
fh = None
try:
fh = open(path,'r+b')
return file_update(fh, value, timestamp)
finally:
if fh:
fh.close() | [
"def",
"update",
"(",
"path",
",",
"value",
",",
"timestamp",
"=",
"None",
")",
":",
"value",
"=",
"float",
"(",
"value",
")",
"fh",
"=",
"None",
"try",
":",
"fh",
"=",
"open",
"(",
"path",
",",
"'r+b'",
")",
"return",
"file_update",
"(",
"fh",
",",
"value",
",",
"timestamp",
")",
"finally",
":",
"if",
"fh",
":",
"fh",
".",
"close",
"(",
")"
] | python | update(path,value,timestamp=None)
path is a string
value is a float
timestamp is either an int or float | false |
2,455,366 | def describe(self, name=str(), **options):
""" Returns the **metadata** of the `Structure` as an
:class:`ordered dictionary <collections.OrderedDict>`.
.. code-block:: python
metadata = {
'class': self.__class__.__name__,
'name': name if name else self.__class__.__name__,
'size': len(self),
'type': Structure.item_type.name
'member': [
item.describe(member) for member, item in self.items()
]
}
:param str name: optional name for the `Structure`.
Fallback is the class name.
:keyword bool nested: if ``True`` all :class:`Pointer` fields of the
`Structure` lists their referenced :attr:`~Pointer.data` object fields
as well (chained method call). Default is ``True``.
"""
members = list()
metadata = OrderedDict()
metadata['class'] = self.__class__.__name__
metadata['name'] = name if name else self.__class__.__name__
metadata['size'] = len(self)
metadata['type'] = self.item_type.name
metadata['member'] = members
for member_name, item in self.items():
# Container
if is_container(item):
members.append(item.describe(member_name, **options))
# Pointer
elif is_pointer(item) and get_nested(options):
members.append(item.describe(member_name, **options))
# Field
elif is_field(item):
members.append(item.describe(member_name, nested=False))
else:
raise MemberTypeError(self, item, member_name)
return metadata | [
"def",
"describe",
"(",
"self",
",",
"name",
"=",
"str",
"(",
")",
",",
"**",
"options",
")",
":",
"members",
"=",
"list",
"(",
")",
"metadata",
"=",
"OrderedDict",
"(",
")",
"metadata",
"[",
"'class'",
"]",
"=",
"self",
".",
"__class__",
".",
"__name__",
"metadata",
"[",
"'name'",
"]",
"=",
"name",
"if",
"name",
"else",
"self",
".",
"__class__",
".",
"__name__",
"metadata",
"[",
"'size'",
"]",
"=",
"len",
"(",
"self",
")",
"metadata",
"[",
"'type'",
"]",
"=",
"self",
".",
"item_type",
".",
"name",
"metadata",
"[",
"'member'",
"]",
"=",
"members",
"for",
"member_name",
",",
"item",
"in",
"self",
".",
"items",
"(",
")",
":",
"if",
"is_container",
"(",
"item",
")",
":",
"members",
".",
"append",
"(",
"item",
".",
"describe",
"(",
"member_name",
",",
"**",
"options",
")",
")",
"elif",
"is_pointer",
"(",
"item",
")",
"and",
"get_nested",
"(",
"options",
")",
":",
"members",
".",
"append",
"(",
"item",
".",
"describe",
"(",
"member_name",
",",
"**",
"options",
")",
")",
"elif",
"is_field",
"(",
"item",
")",
":",
"members",
".",
"append",
"(",
"item",
".",
"describe",
"(",
"member_name",
",",
"nested",
"=",
"False",
")",
")",
"else",
":",
"raise",
"MemberTypeError",
"(",
"self",
",",
"item",
",",
"member_name",
")",
"return",
"metadata"
] | python | Returns the **metadata** of the `Structure` as an
:class:`ordered dictionary <collections.OrderedDict>`.
.. code-block:: python
metadata = {
'class': self.__class__.__name__,
'name': name if name else self.__class__.__name__,
'size': len(self),
'type': Structure.item_type.name
'member': [
item.describe(member) for member, item in self.items()
]
}
:param str name: optional name for the `Structure`.
Fallback is the class name.
:keyword bool nested: if ``True`` all :class:`Pointer` fields of the
`Structure` lists their referenced :attr:`~Pointer.data` object fields
as well (chained method call). Default is ``True``. | false |
1,717,458 | def long_click(self, pos, duration=2.0):
"""
Similar to click but press the screen for the given time interval and then release
Args:
pos (:obj:`2-list/2-tuple`): coordinates (x, y) in range from 0 to 1
duration: duration of press the screen
"""
try:
duration = float(duration)
except ValueError:
raise ValueError('Argument `duration` should be <float>. Got {}'.format(repr(duration)))
if not (0 <= pos[0] <= 1) or not (0 <= pos[1] <= 1):
raise InvalidOperationException('Click position out of screen. {}'.format(repr(pos)))
return self.agent.input.longClick(pos[0], pos[1], duration) | [
"def",
"long_click",
"(",
"self",
",",
"pos",
",",
"duration",
"=",
"2.0",
")",
":",
"try",
":",
"duration",
"=",
"float",
"(",
"duration",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"'Argument `duration` should be <float>. Got {}'",
".",
"format",
"(",
"repr",
"(",
"duration",
")",
")",
")",
"if",
"not",
"(",
"0",
"<=",
"pos",
"[",
"0",
"]",
"<=",
"1",
")",
"or",
"not",
"(",
"0",
"<=",
"pos",
"[",
"1",
"]",
"<=",
"1",
")",
":",
"raise",
"InvalidOperationException",
"(",
"'Click position out of screen. {}'",
".",
"format",
"(",
"repr",
"(",
"pos",
")",
")",
")",
"return",
"self",
".",
"agent",
".",
"input",
".",
"longClick",
"(",
"pos",
"[",
"0",
"]",
",",
"pos",
"[",
"1",
"]",
",",
"duration",
")"
] | python | Similar to click but press the screen for the given time interval and then release
Args:
pos (:obj:`2-list/2-tuple`): coordinates (x, y) in range from 0 to 1
duration: duration of press the screen | false |
1,679,939 | def func_interpolate_na(interpolator, x, y, **kwargs):
'''helper function to apply interpolation along 1 dimension'''
# it would be nice if this wasn't necessary, works around:
# "ValueError: assignment destination is read-only" in assignment below
out = y.copy()
nans = pd.isnull(y)
nonans = ~nans
# fast track for no-nans and all-nans cases
n_nans = nans.sum()
if n_nans == 0 or n_nans == len(y):
return y
f = interpolator(x[nonans], y[nonans], **kwargs)
out[nans] = f(x[nans])
return out | [
"def",
"func_interpolate_na",
"(",
"interpolator",
",",
"x",
",",
"y",
",",
"**",
"kwargs",
")",
":",
"out",
"=",
"y",
".",
"copy",
"(",
")",
"nans",
"=",
"pd",
".",
"isnull",
"(",
"y",
")",
"nonans",
"=",
"~",
"nans",
"n_nans",
"=",
"nans",
".",
"sum",
"(",
")",
"if",
"n_nans",
"==",
"0",
"or",
"n_nans",
"==",
"len",
"(",
"y",
")",
":",
"return",
"y",
"f",
"=",
"interpolator",
"(",
"x",
"[",
"nonans",
"]",
",",
"y",
"[",
"nonans",
"]",
",",
"**",
"kwargs",
")",
"out",
"[",
"nans",
"]",
"=",
"f",
"(",
"x",
"[",
"nans",
"]",
")",
"return",
"out"
] | python | helper function to apply interpolation along 1 dimension | false |
2,024,444 | def report(script=None, input=None, output=None, **kwargs):
'''Write script to an output file specified by `output`, which can be
a filename to which the content of the script will be written,
any object with a "write" attribute (e.g. a file handle) for which the "write"
function will be called with the content. If output is unspecified, the content
will be written to standard output or appended to a file specified with command
line option `-r`. '''
if env.config['run_mode'] == 'dryrun':
if '__std_out__' in env.sos_dict:
with open(env.sos_dict['__std_out__'], 'a') as so:
so.write(f'HINT: report:\n{"" if script is None else script}\n')
if input is not None:
for ifile in input:
so.write(f' from file: {ifile}\n')
else:
print(f'HINT: report:\n{"" if script is None else script}')
if input is not None:
for ifile in input:
print(f' from file: {ifile}')
return
file_handle = None
if isinstance(output, str):
if not output or output == '-':
writer = sys.stdout.write
elif output.startswith('>>'):
file_handle = open(os.path.expanduser(output[2:]), 'a')
writer = file_handle.write
else:
file_handle = open(os.path.expanduser(output), 'w')
writer = file_handle.write
elif isinstance(output, (path, file_target)):
file_handle = open(os.path.expanduser(str(output)), 'w')
writer = file_handle.write
elif isinstance(output, (paths, sos_targets)):
if len(output) != 1:
raise ValueError(f'More than one output is specified {output}')
if not isinstance(output[0], (file_target, path)):
raise ValueError(
f'Action report can only output to file target or standard output'
)
file_handle = open(os.path.expanduser(str(output[0])), 'w')
writer = file_handle.write
elif hasattr(output, 'write'):
writer = output.write
elif output is None or output == '':
writer = sys.stdout.write
else:
raise ValueError(f'Invalid output {output}.')
# file lock to prevent race condition
with TimeoutInterProcessLock(os.path.join(env.temp_dir, 'report_lock')):
if isinstance(script, str) and script.strip():
writer(script.rstrip() + '\n\n')
if input is not None:
if isinstance(input, (str, file_target)):
if 'ACTION' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']:
env.log_to_file('ACTION', f'Loading report from {input}')
with open(input) as ifile:
writer(ifile.read().rstrip() + '\n\n')
elif isinstance(input, Sequence):
for ifile in input:
try:
env.logger.debug(f'Loading report from {ifile}')
with open(ifile) as itmp:
writer(itmp.read().rstrip() + '\n\n')
except Exception as e:
raise ValueError(
f'Failed to read input file {ifile}: {e}')
else:
raise ValueError('Unknown input file for action report')
#
if file_handle:
file_handle.close() | [
"def",
"report",
"(",
"script",
"=",
"None",
",",
"input",
"=",
"None",
",",
"output",
"=",
"None",
",",
"**",
"kwargs",
")",
":",
"if",
"env",
".",
"config",
"[",
"'run_mode'",
"]",
"==",
"'dryrun'",
":",
"if",
"'__std_out__'",
"in",
"env",
".",
"sos_dict",
":",
"with",
"open",
"(",
"env",
".",
"sos_dict",
"[",
"'__std_out__'",
"]",
",",
"'a'",
")",
"as",
"so",
":",
"so",
".",
"write",
"(",
"f'HINT: report:\\n{\"\" if script is None else script}\\n'",
")",
"if",
"input",
"is",
"not",
"None",
":",
"for",
"ifile",
"in",
"input",
":",
"so",
".",
"write",
"(",
"f' from file: {ifile}\\n'",
")",
"else",
":",
"print",
"(",
"f'HINT: report:\\n{\"\" if script is None else script}'",
")",
"if",
"input",
"is",
"not",
"None",
":",
"for",
"ifile",
"in",
"input",
":",
"print",
"(",
"f' from file: {ifile}'",
")",
"return",
"file_handle",
"=",
"None",
"if",
"isinstance",
"(",
"output",
",",
"str",
")",
":",
"if",
"not",
"output",
"or",
"output",
"==",
"'-'",
":",
"writer",
"=",
"sys",
".",
"stdout",
".",
"write",
"elif",
"output",
".",
"startswith",
"(",
"'>>'",
")",
":",
"file_handle",
"=",
"open",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"output",
"[",
"2",
":",
"]",
")",
",",
"'a'",
")",
"writer",
"=",
"file_handle",
".",
"write",
"else",
":",
"file_handle",
"=",
"open",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"output",
")",
",",
"'w'",
")",
"writer",
"=",
"file_handle",
".",
"write",
"elif",
"isinstance",
"(",
"output",
",",
"(",
"path",
",",
"file_target",
")",
")",
":",
"file_handle",
"=",
"open",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"str",
"(",
"output",
")",
")",
",",
"'w'",
")",
"writer",
"=",
"file_handle",
".",
"write",
"elif",
"isinstance",
"(",
"output",
",",
"(",
"paths",
",",
"sos_targets",
")",
")",
":",
"if",
"len",
"(",
"output",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"f'More than one output is specified {output}'",
")",
"if",
"not",
"isinstance",
"(",
"output",
"[",
"0",
"]",
",",
"(",
"file_target",
",",
"path",
")",
")",
":",
"raise",
"ValueError",
"(",
"f'Action report can only output to file target or standard output'",
")",
"file_handle",
"=",
"open",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"str",
"(",
"output",
"[",
"0",
"]",
")",
")",
",",
"'w'",
")",
"writer",
"=",
"file_handle",
".",
"write",
"elif",
"hasattr",
"(",
"output",
",",
"'write'",
")",
":",
"writer",
"=",
"output",
".",
"write",
"elif",
"output",
"is",
"None",
"or",
"output",
"==",
"''",
":",
"writer",
"=",
"sys",
".",
"stdout",
".",
"write",
"else",
":",
"raise",
"ValueError",
"(",
"f'Invalid output {output}.'",
")",
"with",
"TimeoutInterProcessLock",
"(",
"os",
".",
"path",
".",
"join",
"(",
"env",
".",
"temp_dir",
",",
"'report_lock'",
")",
")",
":",
"if",
"isinstance",
"(",
"script",
",",
"str",
")",
"and",
"script",
".",
"strip",
"(",
")",
":",
"writer",
"(",
"script",
".",
"rstrip",
"(",
")",
"+",
"'\\n\\n'",
")",
"if",
"input",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"input",
",",
"(",
"str",
",",
"file_target",
")",
")",
":",
"if",
"'ACTION'",
"in",
"env",
".",
"config",
"[",
"'SOS_DEBUG'",
"]",
"or",
"'ALL'",
"in",
"env",
".",
"config",
"[",
"'SOS_DEBUG'",
"]",
":",
"env",
".",
"log_to_file",
"(",
"'ACTION'",
",",
"f'Loading report from {input}'",
")",
"with",
"open",
"(",
"input",
")",
"as",
"ifile",
":",
"writer",
"(",
"ifile",
".",
"read",
"(",
")",
".",
"rstrip",
"(",
")",
"+",
"'\\n\\n'",
")",
"elif",
"isinstance",
"(",
"input",
",",
"Sequence",
")",
":",
"for",
"ifile",
"in",
"input",
":",
"try",
":",
"env",
".",
"logger",
".",
"debug",
"(",
"f'Loading report from {ifile}'",
")",
"with",
"open",
"(",
"ifile",
")",
"as",
"itmp",
":",
"writer",
"(",
"itmp",
".",
"read",
"(",
")",
".",
"rstrip",
"(",
")",
"+",
"'\\n\\n'",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"ValueError",
"(",
"f'Failed to read input file {ifile}: {e}'",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Unknown input file for action report'",
")",
"if",
"file_handle",
":",
"file_handle",
".",
"close",
"(",
")"
] | python | Write script to an output file specified by `output`, which can be
a filename to which the content of the script will be written,
any object with a "write" attribute (e.g. a file handle) for which the "write"
function will be called with the content. If output is unspecified, the content
will be written to standard output or appended to a file specified with command
line option `-r`. | false |
2,659,963 | def csv(ctx, force, threads, mapping, data):
""" Load CSV data into a grano instance using a mapping specification. """
# Find out how many lines there are (for the progress bar).
lines = 0
for line in DictReader(data):
lines += 1
data.seek(0)
# set up objects
mapping = yaml.load(mapping)
mapping_loader = MappingLoader(ctx.obj['grano'], mapping)
def process_row(row):
try:
mapping_loader.load(row)
except GranoException, ge:
msg = '\nServer error: %s' % ge.message
click.secho(msg, fg='red', bold=True)
if not force:
os._exit(1)
except RowException, re:
if not force:
msg = '\nRow %s: %s' % (row['__row_id__'], re.message)
click.secho(msg, fg='red', bold=True)
os._exit(1)
def generate():
with click.progressbar(DictReader(data),
label=data.name,
length=lines) as bar:
for i, row in enumerate(bar):
row['__row_id__'] = i
yield row
threaded(generate(), process_row, num_threads=threads,
max_queue=1) | [
"def",
"csv",
"(",
"ctx",
",",
"force",
",",
"threads",
",",
"mapping",
",",
"data",
")",
":",
"lines",
"=",
"0",
"for",
"line",
"in",
"DictReader",
"(",
"data",
")",
":",
"lines",
"+=",
"1",
"data",
".",
"seek",
"(",
"0",
")",
"mapping",
"=",
"yaml",
".",
"load",
"(",
"mapping",
")",
"mapping_loader",
"=",
"MappingLoader",
"(",
"ctx",
".",
"obj",
"[",
"'grano'",
"]",
",",
"mapping",
")",
"def",
"process_row",
"(",
"row",
")",
":",
"try",
":",
"mapping_loader",
".",
"load",
"(",
"row",
")",
"except",
"GranoException",
",",
"ge",
":",
"msg",
"=",
"'\\nServer error: %s'",
"%",
"ge",
".",
"message",
"click",
".",
"secho",
"(",
"msg",
",",
"fg",
"=",
"'red'",
",",
"bold",
"=",
"True",
")",
"if",
"not",
"force",
":",
"os",
".",
"_exit",
"(",
"1",
")",
"except",
"RowException",
",",
"re",
":",
"if",
"not",
"force",
":",
"msg",
"=",
"'\\nRow %s: %s'",
"%",
"(",
"row",
"[",
"'__row_id__'",
"]",
",",
"re",
".",
"message",
")",
"click",
".",
"secho",
"(",
"msg",
",",
"fg",
"=",
"'red'",
",",
"bold",
"=",
"True",
")",
"os",
".",
"_exit",
"(",
"1",
")",
"def",
"generate",
"(",
")",
":",
"with",
"click",
".",
"progressbar",
"(",
"DictReader",
"(",
"data",
")",
",",
"label",
"=",
"data",
".",
"name",
",",
"length",
"=",
"lines",
")",
"as",
"bar",
":",
"for",
"i",
",",
"row",
"in",
"enumerate",
"(",
"bar",
")",
":",
"row",
"[",
"'__row_id__'",
"]",
"=",
"i",
"yield",
"row",
"threaded",
"(",
"generate",
"(",
")",
",",
"process_row",
",",
"num_threads",
"=",
"threads",
",",
"max_queue",
"=",
"1",
")"
] | python | Load CSV data into a grano instance using a mapping specification. | false |
2,409,848 | def dyno_hist(x, window=None, probability=True, edge_weight=1.):
""" Probability Distribution function from values
Arguments:
probability (bool): whether the values should be min/max scaled to lie on the range [0, 1]
Like `hist` but smoother, more accurate/useful
Double-Normalization:
The x values are min/max normalized to lie in the range 0-1 inclusive
The pdf is normalized to integrate/sum to 1.0
>>> h = dyno_hist(np.arange(100), window=5)
>>> abs(sum(np.diff(h.index.values) * h.values[1:]) - 1.) < 0.00001
True
>>> h = dyno_hist(np.arange(50), window=12)
>>> abs(sum(np.diff(h.index.values) * h.values[1:]) - 1.) < 0.00001
True
>>> h = dyno_hist(np.random.randn(1000), window=42)
>>> abs(sum(np.diff(h.index.values) * h.values[1:]) - 1.0) < 0.04
True
"""
x = np.sort(x)
if probability:
# normalize x first
x = x - x[0]
x = x / float(x[-1] or 1)
window = window or 1
window = min(max(window, 1), int(len(x) / 1.5))
window += 1
# Empirical Densitites (PDF) based on diff of sorted values
delta = x[(window - 1):] - x[:(1 - window)]
densities = float(window - 1) / (len(delta) + window - 2) / delta
h = pd.Series(densities, index=x[window // 2:][:len(delta)])
if probability:
if h.index[0] > 0:
h = pd.Series(edge_weight * densities[0], index=[0]).append(h)
if h.index[-1] < 1:
h = h.append(pd.Series(edge_weight * densities[-1], index=[1.]))
return h | [
"def",
"dyno_hist",
"(",
"x",
",",
"window",
"=",
"None",
",",
"probability",
"=",
"True",
",",
"edge_weight",
"=",
"1.",
")",
":",
"x",
"=",
"np",
".",
"sort",
"(",
"x",
")",
"if",
"probability",
":",
"x",
"=",
"x",
"-",
"x",
"[",
"0",
"]",
"x",
"=",
"x",
"/",
"float",
"(",
"x",
"[",
"-",
"1",
"]",
"or",
"1",
")",
"window",
"=",
"window",
"or",
"1",
"window",
"=",
"min",
"(",
"max",
"(",
"window",
",",
"1",
")",
",",
"int",
"(",
"len",
"(",
"x",
")",
"/",
"1.5",
")",
")",
"window",
"+=",
"1",
"delta",
"=",
"x",
"[",
"(",
"window",
"-",
"1",
")",
":",
"]",
"-",
"x",
"[",
":",
"(",
"1",
"-",
"window",
")",
"]",
"densities",
"=",
"float",
"(",
"window",
"-",
"1",
")",
"/",
"(",
"len",
"(",
"delta",
")",
"+",
"window",
"-",
"2",
")",
"/",
"delta",
"h",
"=",
"pd",
".",
"Series",
"(",
"densities",
",",
"index",
"=",
"x",
"[",
"window",
"//",
"2",
":",
"]",
"[",
":",
"len",
"(",
"delta",
")",
"]",
")",
"if",
"probability",
":",
"if",
"h",
".",
"index",
"[",
"0",
"]",
">",
"0",
":",
"h",
"=",
"pd",
".",
"Series",
"(",
"edge_weight",
"*",
"densities",
"[",
"0",
"]",
",",
"index",
"=",
"[",
"0",
"]",
")",
".",
"append",
"(",
"h",
")",
"if",
"h",
".",
"index",
"[",
"-",
"1",
"]",
"<",
"1",
":",
"h",
"=",
"h",
".",
"append",
"(",
"pd",
".",
"Series",
"(",
"edge_weight",
"*",
"densities",
"[",
"-",
"1",
"]",
",",
"index",
"=",
"[",
"1.",
"]",
")",
")",
"return",
"h"
] | python | Probability Distribution function from values
Arguments:
probability (bool): whether the values should be min/max scaled to lie on the range [0, 1]
Like `hist` but smoother, more accurate/useful
Double-Normalization:
The x values are min/max normalized to lie in the range 0-1 inclusive
The pdf is normalized to integrate/sum to 1.0
>>> h = dyno_hist(np.arange(100), window=5)
>>> abs(sum(np.diff(h.index.values) * h.values[1:]) - 1.) < 0.00001
True
>>> h = dyno_hist(np.arange(50), window=12)
>>> abs(sum(np.diff(h.index.values) * h.values[1:]) - 1.) < 0.00001
True
>>> h = dyno_hist(np.random.randn(1000), window=42)
>>> abs(sum(np.diff(h.index.values) * h.values[1:]) - 1.0) < 0.04
True | false |
2,644,682 | def __init__(self, actioncollection, parent=None, flags=0):
"""Construct a new dialog for the given action collection
:param actioncollection: the action collection to report
:type actioncollection: :class:`jukeboxcore.action.ActionCollection`
:param parent: Optional - the parent of the window - default is None
:type parent: QWidget
:param flags: the window flags
:type flags: QtCore.Qt.WindowFlags
:raises: None
"""
super(ActionReportDialog, self).__init__(parent, flags)
self.setupUi(self)
self._actioncollection = actioncollection
self._parent = parent
self._flags = flags
status = self._actioncollection.status()
self.status_lb.setText(status.value)
self.message_lb.setText(status.message)
self.traceback_pte.setPlainText(status.traceback)
self.traceback_pte.setVisible(False)
model = create_action_model(self._actioncollection)
self.actions_tablev = WD_TableView(self)
self.actions_tablev.setModel(model)
self.verticalLayout.insertWidget(1, self.actions_tablev)
self.msgdelegate = ActionUnitMessageDelegate(self)
self.tbdelegate = ActionUnitTracebackDelegate(self)
self.actions_tablev.setItemDelegateForColumn(3, self.msgdelegate)
self.actions_tablev.setItemDelegateForColumn(4, self.tbdelegate)
self.actions_tablev.horizontalHeader().setStretchLastSection(True) | [
"def",
"__init__",
"(",
"self",
",",
"actioncollection",
",",
"parent",
"=",
"None",
",",
"flags",
"=",
"0",
")",
":",
"super",
"(",
"ActionReportDialog",
",",
"self",
")",
".",
"__init__",
"(",
"parent",
",",
"flags",
")",
"self",
".",
"setupUi",
"(",
"self",
")",
"self",
".",
"_actioncollection",
"=",
"actioncollection",
"self",
".",
"_parent",
"=",
"parent",
"self",
".",
"_flags",
"=",
"flags",
"status",
"=",
"self",
".",
"_actioncollection",
".",
"status",
"(",
")",
"self",
".",
"status_lb",
".",
"setText",
"(",
"status",
".",
"value",
")",
"self",
".",
"message_lb",
".",
"setText",
"(",
"status",
".",
"message",
")",
"self",
".",
"traceback_pte",
".",
"setPlainText",
"(",
"status",
".",
"traceback",
")",
"self",
".",
"traceback_pte",
".",
"setVisible",
"(",
"False",
")",
"model",
"=",
"create_action_model",
"(",
"self",
".",
"_actioncollection",
")",
"self",
".",
"actions_tablev",
"=",
"WD_TableView",
"(",
"self",
")",
"self",
".",
"actions_tablev",
".",
"setModel",
"(",
"model",
")",
"self",
".",
"verticalLayout",
".",
"insertWidget",
"(",
"1",
",",
"self",
".",
"actions_tablev",
")",
"self",
".",
"msgdelegate",
"=",
"ActionUnitMessageDelegate",
"(",
"self",
")",
"self",
".",
"tbdelegate",
"=",
"ActionUnitTracebackDelegate",
"(",
"self",
")",
"self",
".",
"actions_tablev",
".",
"setItemDelegateForColumn",
"(",
"3",
",",
"self",
".",
"msgdelegate",
")",
"self",
".",
"actions_tablev",
".",
"setItemDelegateForColumn",
"(",
"4",
",",
"self",
".",
"tbdelegate",
")",
"self",
".",
"actions_tablev",
".",
"horizontalHeader",
"(",
")",
".",
"setStretchLastSection",
"(",
"True",
")"
] | python | Construct a new dialog for the given action collection
:param actioncollection: the action collection to report
:type actioncollection: :class:`jukeboxcore.action.ActionCollection`
:param parent: Optional - the parent of the window - default is None
:type parent: QWidget
:param flags: the window flags
:type flags: QtCore.Qt.WindowFlags
:raises: None | false |
2,213,937 | def _help_reindent(self, help, indent=None):
"""Hook to re-indent help strings before writing to stdout.
"help" is the help content to re-indent
"indent" is a string with which to indent each line of the
help content after normalizing. If unspecified or None
then the default is use: the 'self.helpindent' class
attribute. By default this is the empty string, i.e.
no indentation.
By default, all common leading whitespace is removed and then
the lot is indented by 'self.helpindent'. When calculating the
common leading whitespace the first line is ignored -- hence
help content for Conan can be written as follows and have the
expected indentation:
def do_crush(self, ...):
'''${cmd_name}: crush your enemies, see them driven before you...
c.f. Conan the Barbarian'''
"""
if indent is None:
indent = self.helpindent
lines = help.splitlines(0)
_dedentlines(lines, skip_first_line=True)
lines = [(indent + line).rstrip() for line in lines]
return '\n'.join(lines) | [
"def",
"_help_reindent",
"(",
"self",
",",
"help",
",",
"indent",
"=",
"None",
")",
":",
"if",
"indent",
"is",
"None",
":",
"indent",
"=",
"self",
".",
"helpindent",
"lines",
"=",
"help",
".",
"splitlines",
"(",
"0",
")",
"_dedentlines",
"(",
"lines",
",",
"skip_first_line",
"=",
"True",
")",
"lines",
"=",
"[",
"(",
"indent",
"+",
"line",
")",
".",
"rstrip",
"(",
")",
"for",
"line",
"in",
"lines",
"]",
"return",
"'\\n'",
".",
"join",
"(",
"lines",
")"
] | python | Hook to re-indent help strings before writing to stdout.
"help" is the help content to re-indent
"indent" is a string with which to indent each line of the
help content after normalizing. If unspecified or None
then the default is use: the 'self.helpindent' class
attribute. By default this is the empty string, i.e.
no indentation.
By default, all common leading whitespace is removed and then
the lot is indented by 'self.helpindent'. When calculating the
common leading whitespace the first line is ignored -- hence
help content for Conan can be written as follows and have the
expected indentation:
def do_crush(self, ...):
'''${cmd_name}: crush your enemies, see them driven before you...
c.f. Conan the Barbarian''' | false |
2,623,703 | def logTwisted():
"""
Integrate twisted's logger with our logger.
This is done in a separate method because calling this imports and sets
up a reactor. Since we want basic logging working before choosing a
reactor, we need to separate these.
"""
global _initializedTwisted
if _initializedTwisted:
return
debug('log', 'Integrating twisted logger')
# integrate twisted's logging with us
from twisted.python import log as tlog
# this call imports the reactor
# that is why we do this in a separate method
from twisted.spread import pb
# we don't want logs for pb.Error types since they
# are specifically raised to be handled on the other side
observer = _getTheTwistedLogObserver()
observer.ignoreErrors([pb.Error, ])
tlog.startLoggingWithObserver(observer.emit, False)
_initializedTwisted = True | [
"def",
"logTwisted",
"(",
")",
":",
"global",
"_initializedTwisted",
"if",
"_initializedTwisted",
":",
"return",
"debug",
"(",
"'log'",
",",
"'Integrating twisted logger'",
")",
"from",
"twisted",
".",
"python",
"import",
"log",
"as",
"tlog",
"from",
"twisted",
".",
"spread",
"import",
"pb",
"observer",
"=",
"_getTheTwistedLogObserver",
"(",
")",
"observer",
".",
"ignoreErrors",
"(",
"[",
"pb",
".",
"Error",
",",
"]",
")",
"tlog",
".",
"startLoggingWithObserver",
"(",
"observer",
".",
"emit",
",",
"False",
")",
"_initializedTwisted",
"=",
"True"
] | python | Integrate twisted's logger with our logger.
This is done in a separate method because calling this imports and sets
up a reactor. Since we want basic logging working before choosing a
reactor, we need to separate these. | false |
2,284,557 | def __init__(self, width, height,
chip_resources={Cores: 18, SDRAM: 128*1024*1024,
SRAM: 32*1024},
chip_resource_exceptions={}, dead_chips=set(),
dead_links=set()):
"""Defines the resources available within a SpiNNaker system.
Parameters
----------
width : int
height : int
chip_resources : {resource_key: requirement, ...}
chip_resource_exceptions : {(x,y): resources, ...}
dead_chips : set([(x,y,p), ...])
dead_links : set([(x,y,link), ...])
"""
self.width = width
self.height = height
self.chip_resources = chip_resources.copy()
self.chip_resource_exceptions = chip_resource_exceptions.copy()
self.dead_chips = dead_chips.copy()
self.dead_links = dead_links.copy() | [
"def",
"__init__",
"(",
"self",
",",
"width",
",",
"height",
",",
"chip_resources",
"=",
"{",
"Cores",
":",
"18",
",",
"SDRAM",
":",
"128",
"*",
"1024",
"*",
"1024",
",",
"SRAM",
":",
"32",
"*",
"1024",
"}",
",",
"chip_resource_exceptions",
"=",
"{",
"}",
",",
"dead_chips",
"=",
"set",
"(",
")",
",",
"dead_links",
"=",
"set",
"(",
")",
")",
":",
"self",
".",
"width",
"=",
"width",
"self",
".",
"height",
"=",
"height",
"self",
".",
"chip_resources",
"=",
"chip_resources",
".",
"copy",
"(",
")",
"self",
".",
"chip_resource_exceptions",
"=",
"chip_resource_exceptions",
".",
"copy",
"(",
")",
"self",
".",
"dead_chips",
"=",
"dead_chips",
".",
"copy",
"(",
")",
"self",
".",
"dead_links",
"=",
"dead_links",
".",
"copy",
"(",
")"
] | python | Defines the resources available within a SpiNNaker system.
Parameters
----------
width : int
height : int
chip_resources : {resource_key: requirement, ...}
chip_resource_exceptions : {(x,y): resources, ...}
dead_chips : set([(x,y,p), ...])
dead_links : set([(x,y,link), ...]) | false |
2,588,385 | def deconstruct(self):
"""
Denormalize is always false migrations
"""
name, path, args, kwargs = super(AssetsFileField, self).deconstruct()
kwargs['denormalize'] = False
return name, path, args, kwargs | [
"def",
"deconstruct",
"(",
"self",
")",
":",
"name",
",",
"path",
",",
"args",
",",
"kwargs",
"=",
"super",
"(",
"AssetsFileField",
",",
"self",
")",
".",
"deconstruct",
"(",
")",
"kwargs",
"[",
"'denormalize'",
"]",
"=",
"False",
"return",
"name",
",",
"path",
",",
"args",
",",
"kwargs"
] | python | Denormalize is always false migrations | false |
2,207,843 | def adapters(self):
"""
:class:`~zhmcclient.AdapterManager`: Access to the
:term:`Adapters <Adapter>` in this CPC.
"""
# We do here some lazy loading.
if not self._adapters:
self._adapters = AdapterManager(self)
return self._adapters | [
"def",
"adapters",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_adapters",
":",
"self",
".",
"_adapters",
"=",
"AdapterManager",
"(",
"self",
")",
"return",
"self",
".",
"_adapters"
] | python | :class:`~zhmcclient.AdapterManager`: Access to the
:term:`Adapters <Adapter>` in this CPC. | false |
2,216,642 | def oauth_session(request, state=None, token=None):
""" Constructs the OAuth2 session object. """
if settings.DISCORD_REDIRECT_URI is not None:
redirect_uri = settings.DISCORD_REDIRECT_URI
else:
redirect_uri = request.build_absolute_uri(
reverse('discord_bind_callback'))
scope = (['email', 'guilds.join'] if settings.DISCORD_EMAIL_SCOPE
else ['identity', 'guilds.join'])
return OAuth2Session(settings.DISCORD_CLIENT_ID,
redirect_uri=redirect_uri,
scope=scope,
token=token,
state=state) | [
"def",
"oauth_session",
"(",
"request",
",",
"state",
"=",
"None",
",",
"token",
"=",
"None",
")",
":",
"if",
"settings",
".",
"DISCORD_REDIRECT_URI",
"is",
"not",
"None",
":",
"redirect_uri",
"=",
"settings",
".",
"DISCORD_REDIRECT_URI",
"else",
":",
"redirect_uri",
"=",
"request",
".",
"build_absolute_uri",
"(",
"reverse",
"(",
"'discord_bind_callback'",
")",
")",
"scope",
"=",
"(",
"[",
"'email'",
",",
"'guilds.join'",
"]",
"if",
"settings",
".",
"DISCORD_EMAIL_SCOPE",
"else",
"[",
"'identity'",
",",
"'guilds.join'",
"]",
")",
"return",
"OAuth2Session",
"(",
"settings",
".",
"DISCORD_CLIENT_ID",
",",
"redirect_uri",
"=",
"redirect_uri",
",",
"scope",
"=",
"scope",
",",
"token",
"=",
"token",
",",
"state",
"=",
"state",
")"
] | python | Constructs the OAuth2 session object. | false |
2,500,719 | def set_framecolor(self, color):
"""set color for outer frame"""
self.framecolor = color
self.canvas.figure.set_facecolor(color)
if callable(self.theme_color_callback):
self.theme_color_callback(color, 'frame') | [
"def",
"set_framecolor",
"(",
"self",
",",
"color",
")",
":",
"self",
".",
"framecolor",
"=",
"color",
"self",
".",
"canvas",
".",
"figure",
".",
"set_facecolor",
"(",
"color",
")",
"if",
"callable",
"(",
"self",
".",
"theme_color_callback",
")",
":",
"self",
".",
"theme_color_callback",
"(",
"color",
",",
"'frame'",
")"
] | python | set color for outer frame | false |
2,095,169 | def points_random_3d(count, range_x=(-10.0, 10.0), range_y=(-10.0, 10.0), range_z=(-10.0, 10.0), seed=None) -> VAO:
"""
Generates random positions inside a confied box.
Args:
count (int): Number of points to generate
Keyword Args:
range_x (tuple): min-max range for x axis: Example (-10.0. 10.0)
range_y (tuple): min-max range for y axis: Example (-10.0. 10.0)
range_z (tuple): min-max range for z axis: Example (-10.0. 10.0)
seed (int): The random seed
Returns:
A :py:class:`demosys.opengl.vao.VAO` instance
"""
random.seed(seed)
def gen():
for _ in range(count):
yield random.uniform(*range_x)
yield random.uniform(*range_y)
yield random.uniform(*range_z)
data = numpy.fromiter(gen(), count=count * 3, dtype=numpy.float32)
vao = VAO("geometry:points_random_3d", mode=moderngl.POINTS)
vao.buffer(data, '3f', ['in_position'])
return vao | [
"def",
"points_random_3d",
"(",
"count",
",",
"range_x",
"=",
"(",
"-",
"10.0",
",",
"10.0",
")",
",",
"range_y",
"=",
"(",
"-",
"10.0",
",",
"10.0",
")",
",",
"range_z",
"=",
"(",
"-",
"10.0",
",",
"10.0",
")",
",",
"seed",
"=",
"None",
")",
"->",
"VAO",
":",
"random",
".",
"seed",
"(",
"seed",
")",
"def",
"gen",
"(",
")",
":",
"for",
"_",
"in",
"range",
"(",
"count",
")",
":",
"yield",
"random",
".",
"uniform",
"(",
"*",
"range_x",
")",
"yield",
"random",
".",
"uniform",
"(",
"*",
"range_y",
")",
"yield",
"random",
".",
"uniform",
"(",
"*",
"range_z",
")",
"data",
"=",
"numpy",
".",
"fromiter",
"(",
"gen",
"(",
")",
",",
"count",
"=",
"count",
"*",
"3",
",",
"dtype",
"=",
"numpy",
".",
"float32",
")",
"vao",
"=",
"VAO",
"(",
"\"geometry:points_random_3d\"",
",",
"mode",
"=",
"moderngl",
".",
"POINTS",
")",
"vao",
".",
"buffer",
"(",
"data",
",",
"'3f'",
",",
"[",
"'in_position'",
"]",
")",
"return",
"vao"
] | python | Generates random positions inside a confied box.
Args:
count (int): Number of points to generate
Keyword Args:
range_x (tuple): min-max range for x axis: Example (-10.0. 10.0)
range_y (tuple): min-max range for y axis: Example (-10.0. 10.0)
range_z (tuple): min-max range for z axis: Example (-10.0. 10.0)
seed (int): The random seed
Returns:
A :py:class:`demosys.opengl.vao.VAO` instance | false |
2,096,718 | def benchmark_process_and_backend(exe, backend):
"""Returns BenchmarkResults for a given executable and backend."""
env = dict(os.environ)
env['GOLESS_BACKEND'] = backend
args = [exe, '-m', 'benchmark']
return get_benchproc_results(args, env=env) | [
"def",
"benchmark_process_and_backend",
"(",
"exe",
",",
"backend",
")",
":",
"env",
"=",
"dict",
"(",
"os",
".",
"environ",
")",
"env",
"[",
"'GOLESS_BACKEND'",
"]",
"=",
"backend",
"args",
"=",
"[",
"exe",
",",
"'-m'",
",",
"'benchmark'",
"]",
"return",
"get_benchproc_results",
"(",
"args",
",",
"env",
"=",
"env",
")"
] | python | Returns BenchmarkResults for a given executable and backend. | false |
2,349,626 | def emit_only(self, event: str, func_names: Union[str, List[str]], *args,
**kwargs) -> None:
""" Specifically only emits certain subscribed events.
:param event: Name of the event.
:type event: str
:param func_names: Function(s) to emit.
:type func_names: Union[ str | List[str] ]
"""
if isinstance(func_names, str):
func_names = [func_names]
for func in self._event_funcs(event):
if func.__name__ in func_names:
func(*args, **kwargs) | [
"def",
"emit_only",
"(",
"self",
",",
"event",
":",
"str",
",",
"func_names",
":",
"Union",
"[",
"str",
",",
"List",
"[",
"str",
"]",
"]",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
"->",
"None",
":",
"if",
"isinstance",
"(",
"func_names",
",",
"str",
")",
":",
"func_names",
"=",
"[",
"func_names",
"]",
"for",
"func",
"in",
"self",
".",
"_event_funcs",
"(",
"event",
")",
":",
"if",
"func",
".",
"__name__",
"in",
"func_names",
":",
"func",
"(",
"*",
"args",
",",
"**",
"kwargs",
")"
] | python | Specifically only emits certain subscribed events.
:param event: Name of the event.
:type event: str
:param func_names: Function(s) to emit.
:type func_names: Union[ str | List[str] ] | false |
2,264,493 | def close_others(self):
"""
Closes every editors tabs except the current one.
"""
current_widget = self.currentWidget()
self._try_close_dirty_tabs(exept=current_widget)
i = 0
while self.count() > 1:
widget = self.widget(i)
if widget != current_widget:
self.removeTab(i)
else:
i = 1 | [
"def",
"close_others",
"(",
"self",
")",
":",
"current_widget",
"=",
"self",
".",
"currentWidget",
"(",
")",
"self",
".",
"_try_close_dirty_tabs",
"(",
"exept",
"=",
"current_widget",
")",
"i",
"=",
"0",
"while",
"self",
".",
"count",
"(",
")",
">",
"1",
":",
"widget",
"=",
"self",
".",
"widget",
"(",
"i",
")",
"if",
"widget",
"!=",
"current_widget",
":",
"self",
".",
"removeTab",
"(",
"i",
")",
"else",
":",
"i",
"=",
"1"
] | python | Closes every editors tabs except the current one. | false |
1,744,816 | def _register_bounds_validator_if_needed(parser, name, flag_values):
"""Enforces lower and upper bounds for numeric flags.
Args:
parser: NumericParser (either FloatParser or IntegerParser), provides lower
and upper bounds, and help text to display.
name: str, name of the flag
flag_values: FlagValues.
"""
if parser.lower_bound is not None or parser.upper_bound is not None:
def checker(value):
if value is not None and parser.is_outside_bounds(value):
message = '%s is not %s' % (value, parser.syntactic_help)
raise _exceptions.ValidationError(message)
return True
_validators.register_validator(name, checker, flag_values=flag_values) | [
"def",
"_register_bounds_validator_if_needed",
"(",
"parser",
",",
"name",
",",
"flag_values",
")",
":",
"if",
"parser",
".",
"lower_bound",
"is",
"not",
"None",
"or",
"parser",
".",
"upper_bound",
"is",
"not",
"None",
":",
"def",
"checker",
"(",
"value",
")",
":",
"if",
"value",
"is",
"not",
"None",
"and",
"parser",
".",
"is_outside_bounds",
"(",
"value",
")",
":",
"message",
"=",
"'%s is not %s'",
"%",
"(",
"value",
",",
"parser",
".",
"syntactic_help",
")",
"raise",
"_exceptions",
".",
"ValidationError",
"(",
"message",
")",
"return",
"True",
"_validators",
".",
"register_validator",
"(",
"name",
",",
"checker",
",",
"flag_values",
"=",
"flag_values",
")"
] | python | Enforces lower and upper bounds for numeric flags.
Args:
parser: NumericParser (either FloatParser or IntegerParser), provides lower
and upper bounds, and help text to display.
name: str, name of the flag
flag_values: FlagValues. | false |
1,862,838 | def __init__(self, payload=None, deployjar=None, **kwargs):
"""
:param boolean deployjar: If True, pack all 3rdparty and internal jar classfiles into
a single deployjar in the bundle's root dir. If unset, all jars will go into the
bundle's libs directory, the root will only contain a synthetic jar with its manifest's
Class-Path set to those jars.
"""
payload = payload or Payload()
payload.add_field('deployjar', PrimitiveField(deployjar))
super(JvmApp, self).__init__(payload=payload, **kwargs) | [
"def",
"__init__",
"(",
"self",
",",
"payload",
"=",
"None",
",",
"deployjar",
"=",
"None",
",",
"**",
"kwargs",
")",
":",
"payload",
"=",
"payload",
"or",
"Payload",
"(",
")",
"payload",
".",
"add_field",
"(",
"'deployjar'",
",",
"PrimitiveField",
"(",
"deployjar",
")",
")",
"super",
"(",
"JvmApp",
",",
"self",
")",
".",
"__init__",
"(",
"payload",
"=",
"payload",
",",
"**",
"kwargs",
")"
] | python | :param boolean deployjar: If True, pack all 3rdparty and internal jar classfiles into
a single deployjar in the bundle's root dir. If unset, all jars will go into the
bundle's libs directory, the root will only contain a synthetic jar with its manifest's
Class-Path set to those jars. | false |
2,099,997 | def get_pt(self, viewer, points, pt, canvas_radius=None):
"""Takes an array of points `points` and a target point `pt`.
Returns the first index of the point that is within the
radius of the target point. If none of the points are within
the radius, returns None.
"""
if canvas_radius is None:
canvas_radius = self.cap_radius
if hasattr(self, 'rot_deg'):
# rotate point back to cartesian alignment for test
ctr_pt = self.get_center_pt()
pt = trcalc.rotate_coord(pt, [-self.rot_deg], ctr_pt)
res = self.within_radius(viewer, points, pt, canvas_radius)
return np.flatnonzero(res) | [
"def",
"get_pt",
"(",
"self",
",",
"viewer",
",",
"points",
",",
"pt",
",",
"canvas_radius",
"=",
"None",
")",
":",
"if",
"canvas_radius",
"is",
"None",
":",
"canvas_radius",
"=",
"self",
".",
"cap_radius",
"if",
"hasattr",
"(",
"self",
",",
"'rot_deg'",
")",
":",
"ctr_pt",
"=",
"self",
".",
"get_center_pt",
"(",
")",
"pt",
"=",
"trcalc",
".",
"rotate_coord",
"(",
"pt",
",",
"[",
"-",
"self",
".",
"rot_deg",
"]",
",",
"ctr_pt",
")",
"res",
"=",
"self",
".",
"within_radius",
"(",
"viewer",
",",
"points",
",",
"pt",
",",
"canvas_radius",
")",
"return",
"np",
".",
"flatnonzero",
"(",
"res",
")"
] | python | Takes an array of points `points` and a target point `pt`.
Returns the first index of the point that is within the
radius of the target point. If none of the points are within
the radius, returns None. | false |
2,087,075 | def set_entry(self, filename, obj):
"""
Set the entry.
"""
self.entries[filename] = obj
self.dirty = True | [
"def",
"set_entry",
"(",
"self",
",",
"filename",
",",
"obj",
")",
":",
"self",
".",
"entries",
"[",
"filename",
"]",
"=",
"obj",
"self",
".",
"dirty",
"=",
"True"
] | python | Set the entry. | false |
1,930,114 | def bvlpdu_contents(self, use_dict=None, as_class=dict):
"""Return the contents of an object as a dict."""
return key_value_contents(use_dict=use_dict, as_class=as_class,
key_values=(
('function', 'DeleteForeignDeviceTableEntry'),
('address', str(self.bvlciAddress)),
)) | [
"def",
"bvlpdu_contents",
"(",
"self",
",",
"use_dict",
"=",
"None",
",",
"as_class",
"=",
"dict",
")",
":",
"return",
"key_value_contents",
"(",
"use_dict",
"=",
"use_dict",
",",
"as_class",
"=",
"as_class",
",",
"key_values",
"=",
"(",
"(",
"'function'",
",",
"'DeleteForeignDeviceTableEntry'",
")",
",",
"(",
"'address'",
",",
"str",
"(",
"self",
".",
"bvlciAddress",
")",
")",
",",
")",
")"
] | python | Return the contents of an object as a dict. | false |
1,823,061 | def _safe_get_element_date(self, path, root=None):
"""Safe get elemnent date.
Get element as datetime.date or None,
:param root:
Lxml element.
:param path:
String path (i.e. 'Items.Item.Offers.Offer').
:return:
datetime.date or None.
"""
value = self._safe_get_element_text(path=path, root=root)
if value is not None:
try:
value = dateutil.parser.parse(value)
if value:
value = value.date()
except ValueError:
value = None
return value | [
"def",
"_safe_get_element_date",
"(",
"self",
",",
"path",
",",
"root",
"=",
"None",
")",
":",
"value",
"=",
"self",
".",
"_safe_get_element_text",
"(",
"path",
"=",
"path",
",",
"root",
"=",
"root",
")",
"if",
"value",
"is",
"not",
"None",
":",
"try",
":",
"value",
"=",
"dateutil",
".",
"parser",
".",
"parse",
"(",
"value",
")",
"if",
"value",
":",
"value",
"=",
"value",
".",
"date",
"(",
")",
"except",
"ValueError",
":",
"value",
"=",
"None",
"return",
"value"
] | python | Safe get elemnent date.
Get element as datetime.date or None,
:param root:
Lxml element.
:param path:
String path (i.e. 'Items.Item.Offers.Offer').
:return:
datetime.date or None. | false |
2,708,783 | def info(self, *args) -> "Err":
"""
Creates an info message
"""
error = self._create_err("info", *args)
print(self._errmsg(error))
return error | [
"def",
"info",
"(",
"self",
",",
"*",
"args",
")",
"->",
"\"Err\"",
":",
"error",
"=",
"self",
".",
"_create_err",
"(",
"\"info\"",
",",
"*",
"args",
")",
"print",
"(",
"self",
".",
"_errmsg",
"(",
"error",
")",
")",
"return",
"error"
] | python | Creates an info message | false |
1,711,976 | def _find_inline_images(contentsinfo):
"Find inline images in the contentstream"
for n, inline in enumerate(contentsinfo.inline_images):
yield ImageInfo(
name='inline-%02d' % n, shorthand=inline.shorthand, inline=inline
) | [
"def",
"_find_inline_images",
"(",
"contentsinfo",
")",
":",
"for",
"n",
",",
"inline",
"in",
"enumerate",
"(",
"contentsinfo",
".",
"inline_images",
")",
":",
"yield",
"ImageInfo",
"(",
"name",
"=",
"'inline-%02d'",
"%",
"n",
",",
"shorthand",
"=",
"inline",
".",
"shorthand",
",",
"inline",
"=",
"inline",
")"
] | python | Find inline images in the contentstream | false |
1,742,246 | def define_snowflake_config():
'''Snowflake configuration.
See the Snowflake documentation for reference:
https://docs.snowflake.net/manuals/user-guide/python-connector-api.html
'''
account = Field(
String,
description='Your Snowflake account name. For more details, see https://bit.ly/2FBL320.',
is_optional=True,
)
user = Field(String, description='User login name.', is_optional=False)
password = Field(String, description='User password.', is_optional=False)
database = Field(
String,
description='''Name of the default database to use. After login, you can use USE DATABASE
to change the database.''',
is_optional=True,
)
schema = Field(
String,
description='''Name of the default schema to use. After login, you can use USE SCHEMA to
change the schema.''',
is_optional=True,
)
role = Field(
String,
description='''Name of the default role to use. After login, you can use USE ROLE to change
the role.''',
is_optional=True,
)
warehouse = Field(
String,
description='''Name of the default warehouse to use. After login, you can use USE WAREHOUSE
to change the role.''',
is_optional=True,
)
autocommit = Field(
Bool,
description='''None by default, which honors the Snowflake parameter AUTOCOMMIT. Set to True
or False to enable or disable autocommit mode in the session, respectively.''',
is_optional=True,
)
client_prefetch_threads = Field(
Int,
description='''Number of threads used to download the results sets (4 by default).
Increasing the value improves fetch performance but requires more memory.''',
is_optional=True,
)
client_session_keep_alive = Field(
String,
description='''False by default. Set this to True to keep the session active indefinitely,
even if there is no activity from the user. Make certain to call the close method to
terminate the thread properly or the process may hang.''',
is_optional=True,
)
login_timeout = Field(
Int,
description='''Timeout in seconds for login. By default, 60 seconds. The login request gives
up after the timeout length if the HTTP response is "success".''',
is_optional=True,
)
network_timeout = Field(
Int,
description='''Timeout in seconds for all other operations. By default, none/infinite. A
general request gives up after the timeout length if the HTTP response is not "success"''',
is_optional=True,
)
ocsp_response_cache_filename = Field(
Path,
description='''URI for the OCSP response cache file.
By default, the OCSP response cache file is created in the cache directory.''',
is_optional=True,
)
validate_default_parameters = Field(
Bool,
description='''False by default. Raise an exception if either one of specified database,
schema or warehouse doesn't exists if True.''',
is_optional=True,
)
paramstyle = Field(
# TODO should validate only against permissible values for this
String,
description='''pyformat by default for client side binding. Specify qmark or numeric to
change bind variable formats for server side binding.''',
is_optional=True,
)
timezone = Field(
String,
description='''None by default, which honors the Snowflake parameter TIMEZONE. Set to a
valid time zone (e.g. America/Los_Angeles) to set the session time zone.''',
is_optional=True,
)
return Field(
Dict(
fields={
'account': account,
'user': user,
'password': password,
'database': database,
'schema': schema,
'role': role,
'warehouse': warehouse,
'autocommit': autocommit,
'client_prefetch_threads': client_prefetch_threads,
'client_session_keep_alive': client_session_keep_alive,
'login_timeout': login_timeout,
'network_timeout': network_timeout,
'ocsp_response_cache_filename': ocsp_response_cache_filename,
'validate_default_parameters': validate_default_parameters,
'paramstyle': paramstyle,
'timezone': timezone,
}
),
description='Snowflake configuration',
) | [
"def",
"define_snowflake_config",
"(",
")",
":",
"account",
"=",
"Field",
"(",
"String",
",",
"description",
"=",
"'Your Snowflake account name. For more details, see https://bit.ly/2FBL320.'",
",",
"is_optional",
"=",
"True",
",",
")",
"user",
"=",
"Field",
"(",
"String",
",",
"description",
"=",
"'User login name.'",
",",
"is_optional",
"=",
"False",
")",
"password",
"=",
"Field",
"(",
"String",
",",
"description",
"=",
"'User password.'",
",",
"is_optional",
"=",
"False",
")",
"database",
"=",
"Field",
"(",
"String",
",",
"description",
"=",
"'''Name of the default database to use. After login, you can use USE DATABASE\n to change the database.'''",
",",
"is_optional",
"=",
"True",
",",
")",
"schema",
"=",
"Field",
"(",
"String",
",",
"description",
"=",
"'''Name of the default schema to use. After login, you can use USE SCHEMA to \n change the schema.'''",
",",
"is_optional",
"=",
"True",
",",
")",
"role",
"=",
"Field",
"(",
"String",
",",
"description",
"=",
"'''Name of the default role to use. After login, you can use USE ROLE to change\n the role.'''",
",",
"is_optional",
"=",
"True",
",",
")",
"warehouse",
"=",
"Field",
"(",
"String",
",",
"description",
"=",
"'''Name of the default warehouse to use. After login, you can use USE WAREHOUSE\n to change the role.'''",
",",
"is_optional",
"=",
"True",
",",
")",
"autocommit",
"=",
"Field",
"(",
"Bool",
",",
"description",
"=",
"'''None by default, which honors the Snowflake parameter AUTOCOMMIT. Set to True\n or False to enable or disable autocommit mode in the session, respectively.'''",
",",
"is_optional",
"=",
"True",
",",
")",
"client_prefetch_threads",
"=",
"Field",
"(",
"Int",
",",
"description",
"=",
"'''Number of threads used to download the results sets (4 by default).\n Increasing the value improves fetch performance but requires more memory.'''",
",",
"is_optional",
"=",
"True",
",",
")",
"client_session_keep_alive",
"=",
"Field",
"(",
"String",
",",
"description",
"=",
"'''False by default. Set this to True to keep the session active indefinitely,\n even if there is no activity from the user. Make certain to call the close method to\n terminate the thread properly or the process may hang.'''",
",",
"is_optional",
"=",
"True",
",",
")",
"login_timeout",
"=",
"Field",
"(",
"Int",
",",
"description",
"=",
"'''Timeout in seconds for login. By default, 60 seconds. The login request gives\n up after the timeout length if the HTTP response is \"success\".'''",
",",
"is_optional",
"=",
"True",
",",
")",
"network_timeout",
"=",
"Field",
"(",
"Int",
",",
"description",
"=",
"'''Timeout in seconds for all other operations. By default, none/infinite. A\n general request gives up after the timeout length if the HTTP response is not \"success\"'''",
",",
"is_optional",
"=",
"True",
",",
")",
"ocsp_response_cache_filename",
"=",
"Field",
"(",
"Path",
",",
"description",
"=",
"'''URI for the OCSP response cache file.\n By default, the OCSP response cache file is created in the cache directory.'''",
",",
"is_optional",
"=",
"True",
",",
")",
"validate_default_parameters",
"=",
"Field",
"(",
"Bool",
",",
"description",
"=",
"'''False by default. Raise an exception if either one of specified database,\n schema or warehouse doesn't exists if True.'''",
",",
"is_optional",
"=",
"True",
",",
")",
"paramstyle",
"=",
"Field",
"(",
"String",
",",
"description",
"=",
"'''pyformat by default for client side binding. Specify qmark or numeric to\n change bind variable formats for server side binding.'''",
",",
"is_optional",
"=",
"True",
",",
")",
"timezone",
"=",
"Field",
"(",
"String",
",",
"description",
"=",
"'''None by default, which honors the Snowflake parameter TIMEZONE. Set to a\n valid time zone (e.g. America/Los_Angeles) to set the session time zone.'''",
",",
"is_optional",
"=",
"True",
",",
")",
"return",
"Field",
"(",
"Dict",
"(",
"fields",
"=",
"{",
"'account'",
":",
"account",
",",
"'user'",
":",
"user",
",",
"'password'",
":",
"password",
",",
"'database'",
":",
"database",
",",
"'schema'",
":",
"schema",
",",
"'role'",
":",
"role",
",",
"'warehouse'",
":",
"warehouse",
",",
"'autocommit'",
":",
"autocommit",
",",
"'client_prefetch_threads'",
":",
"client_prefetch_threads",
",",
"'client_session_keep_alive'",
":",
"client_session_keep_alive",
",",
"'login_timeout'",
":",
"login_timeout",
",",
"'network_timeout'",
":",
"network_timeout",
",",
"'ocsp_response_cache_filename'",
":",
"ocsp_response_cache_filename",
",",
"'validate_default_parameters'",
":",
"validate_default_parameters",
",",
"'paramstyle'",
":",
"paramstyle",
",",
"'timezone'",
":",
"timezone",
",",
"}",
")",
",",
"description",
"=",
"'Snowflake configuration'",
",",
")"
] | python | Snowflake configuration.
See the Snowflake documentation for reference:
https://docs.snowflake.net/manuals/user-guide/python-connector-api.html | false |