id_within_dataset
int64 46
2.71M
| snippet
stringlengths 63
481k
| tokens
sequencelengths 20
15.6k
| language
stringclasses 2
values | nl
stringlengths 1
32.4k
| is_duplicated
bool 2
classes |
---|---|---|---|---|---|
2,449,647 | def _get_valid_port(self, port):
"""
:param port:
:return "port/protocol":
"""
if '/' in port:
port, protocol = port.split('/')
else:
protocol = 'tcp'
try:
int(port)
except ValueError:
raise ValueError("{0} isn't a valid port number.".format(port))
if protocol not in ('tcp', 'udp'):
raise ValueError("exposed ports only supports udp or tcp. {0} was passed".format(protocol))
return "{0}/{1}".format(port, protocol) | [
"def",
"_get_valid_port",
"(",
"self",
",",
"port",
")",
":",
"if",
"'/'",
"in",
"port",
":",
"port",
",",
"protocol",
"=",
"port",
".",
"split",
"(",
"'/'",
")",
"else",
":",
"protocol",
"=",
"'tcp'",
"try",
":",
"int",
"(",
"port",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"{0} isn't a valid port number.\"",
".",
"format",
"(",
"port",
")",
")",
"if",
"protocol",
"not",
"in",
"(",
"'tcp'",
",",
"'udp'",
")",
":",
"raise",
"ValueError",
"(",
"\"exposed ports only supports udp or tcp. {0} was passed\"",
".",
"format",
"(",
"protocol",
")",
")",
"return",
"\"{0}/{1}\"",
".",
"format",
"(",
"port",
",",
"protocol",
")"
] | python | :param port:
:return "port/protocol": | false |
2,144,015 | def curve_intersection(c1, c2, grid=16):
'''
curve_intersect(c1, c2) yields the parametric distances (t1, t2) such that c1(t1) == c2(t2).
The optional parameter grid may specify the number of grid-points
to use in the initial search for a start-point (default: 16).
'''
from scipy.optimize import minimize
from neuropythy.geometry import segment_intersection_2D
if c1.coordinates.shape[1] > c2.coordinates.shape[1]:
(t1,t2) = curve_intersection(c2, c1, grid=grid)
return (t2,t1)
# before doing a search, see if there are literal exact intersections of the segments
x1s = c1.coordinates.T
x2s = c2.coordinates
for (ts,te,xs,xe) in zip(c1.t[:-1], c1.t[1:], x1s[:-1], x1s[1:]):
pts = segment_intersection_2D((xs,xe), (x2s[:,:-1], x2s[:,1:]))
ii = np.where(np.isfinite(pts[0]))[0]
if len(ii) > 0:
ii = ii[0]
def f(t): return np.sum((c1(t[0]) - c2(t[1]))**2)
t01 = 0.5*(ts + te)
t02 = 0.5*(c2.t[ii] + c2.t[ii+1])
(t1,t2) = minimize(f, (t01, t02)).x
return (t1,t2)
if pimms.is_vector(grid): (ts1,ts2) = [c.t[0] + (c.t[-1] - c.t[0])*grid for c in (c1,c2)]
else: (ts1,ts2) = [np.linspace(c.t[0], c.t[-1], grid) for c in (c1,c2)]
(pts1,pts2) = [c(ts) for (c,ts) in zip([c1,c2],[ts1,ts2])]
ds = np.sqrt([np.sum((pts2.T - pp)**2, axis=1) for pp in pts1.T])
(ii,jj) = np.unravel_index(np.argmin(ds), ds.shape)
(t01,t02) = (ts1[ii], ts2[jj])
ttt = []
def f(t): return np.sum((c1(t[0]) - c2(t[1]))**2)
(t1,t2) = minimize(f, (t01, t02)).x
return (t1,t2) | [
"def",
"curve_intersection",
"(",
"c1",
",",
"c2",
",",
"grid",
"=",
"16",
")",
":",
"from",
"scipy",
".",
"optimize",
"import",
"minimize",
"from",
"neuropythy",
".",
"geometry",
"import",
"segment_intersection_2D",
"if",
"c1",
".",
"coordinates",
".",
"shape",
"[",
"1",
"]",
">",
"c2",
".",
"coordinates",
".",
"shape",
"[",
"1",
"]",
":",
"(",
"t1",
",",
"t2",
")",
"=",
"curve_intersection",
"(",
"c2",
",",
"c1",
",",
"grid",
"=",
"grid",
")",
"return",
"(",
"t2",
",",
"t1",
")",
"x1s",
"=",
"c1",
".",
"coordinates",
".",
"T",
"x2s",
"=",
"c2",
".",
"coordinates",
"for",
"(",
"ts",
",",
"te",
",",
"xs",
",",
"xe",
")",
"in",
"zip",
"(",
"c1",
".",
"t",
"[",
":",
"-",
"1",
"]",
",",
"c1",
".",
"t",
"[",
"1",
":",
"]",
",",
"x1s",
"[",
":",
"-",
"1",
"]",
",",
"x1s",
"[",
"1",
":",
"]",
")",
":",
"pts",
"=",
"segment_intersection_2D",
"(",
"(",
"xs",
",",
"xe",
")",
",",
"(",
"x2s",
"[",
":",
",",
":",
"-",
"1",
"]",
",",
"x2s",
"[",
":",
",",
"1",
":",
"]",
")",
")",
"ii",
"=",
"np",
".",
"where",
"(",
"np",
".",
"isfinite",
"(",
"pts",
"[",
"0",
"]",
")",
")",
"[",
"0",
"]",
"if",
"len",
"(",
"ii",
")",
">",
"0",
":",
"ii",
"=",
"ii",
"[",
"0",
"]",
"def",
"f",
"(",
"t",
")",
":",
"return",
"np",
".",
"sum",
"(",
"(",
"c1",
"(",
"t",
"[",
"0",
"]",
")",
"-",
"c2",
"(",
"t",
"[",
"1",
"]",
")",
")",
"**",
"2",
")",
"t01",
"=",
"0.5",
"*",
"(",
"ts",
"+",
"te",
")",
"t02",
"=",
"0.5",
"*",
"(",
"c2",
".",
"t",
"[",
"ii",
"]",
"+",
"c2",
".",
"t",
"[",
"ii",
"+",
"1",
"]",
")",
"(",
"t1",
",",
"t2",
")",
"=",
"minimize",
"(",
"f",
",",
"(",
"t01",
",",
"t02",
")",
")",
".",
"x",
"return",
"(",
"t1",
",",
"t2",
")",
"if",
"pimms",
".",
"is_vector",
"(",
"grid",
")",
":",
"(",
"ts1",
",",
"ts2",
")",
"=",
"[",
"c",
".",
"t",
"[",
"0",
"]",
"+",
"(",
"c",
".",
"t",
"[",
"-",
"1",
"]",
"-",
"c",
".",
"t",
"[",
"0",
"]",
")",
"*",
"grid",
"for",
"c",
"in",
"(",
"c1",
",",
"c2",
")",
"]",
"else",
":",
"(",
"ts1",
",",
"ts2",
")",
"=",
"[",
"np",
".",
"linspace",
"(",
"c",
".",
"t",
"[",
"0",
"]",
",",
"c",
".",
"t",
"[",
"-",
"1",
"]",
",",
"grid",
")",
"for",
"c",
"in",
"(",
"c1",
",",
"c2",
")",
"]",
"(",
"pts1",
",",
"pts2",
")",
"=",
"[",
"c",
"(",
"ts",
")",
"for",
"(",
"c",
",",
"ts",
")",
"in",
"zip",
"(",
"[",
"c1",
",",
"c2",
"]",
",",
"[",
"ts1",
",",
"ts2",
"]",
")",
"]",
"ds",
"=",
"np",
".",
"sqrt",
"(",
"[",
"np",
".",
"sum",
"(",
"(",
"pts2",
".",
"T",
"-",
"pp",
")",
"**",
"2",
",",
"axis",
"=",
"1",
")",
"for",
"pp",
"in",
"pts1",
".",
"T",
"]",
")",
"(",
"ii",
",",
"jj",
")",
"=",
"np",
".",
"unravel_index",
"(",
"np",
".",
"argmin",
"(",
"ds",
")",
",",
"ds",
".",
"shape",
")",
"(",
"t01",
",",
"t02",
")",
"=",
"(",
"ts1",
"[",
"ii",
"]",
",",
"ts2",
"[",
"jj",
"]",
")",
"ttt",
"=",
"[",
"]",
"def",
"f",
"(",
"t",
")",
":",
"return",
"np",
".",
"sum",
"(",
"(",
"c1",
"(",
"t",
"[",
"0",
"]",
")",
"-",
"c2",
"(",
"t",
"[",
"1",
"]",
")",
")",
"**",
"2",
")",
"(",
"t1",
",",
"t2",
")",
"=",
"minimize",
"(",
"f",
",",
"(",
"t01",
",",
"t02",
")",
")",
".",
"x",
"return",
"(",
"t1",
",",
"t2",
")"
] | python | curve_intersect(c1, c2) yields the parametric distances (t1, t2) such that c1(t1) == c2(t2).
The optional parameter grid may specify the number of grid-points
to use in the initial search for a start-point (default: 16). | false |
2,158,507 | def __init__(self, element_type=None, length=None, content=b''):
"""Create a HelloElemHeader with the optional parameters below.
Args:
element_type: One of OFPHET_*.
length: Length in bytes of the element, including this header,
excluding padding.
"""
super().__init__()
self.element_type = element_type
self.length = length
self.content = content | [
"def",
"__init__",
"(",
"self",
",",
"element_type",
"=",
"None",
",",
"length",
"=",
"None",
",",
"content",
"=",
"b''",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
")",
"self",
".",
"element_type",
"=",
"element_type",
"self",
".",
"length",
"=",
"length",
"self",
".",
"content",
"=",
"content"
] | python | Create a HelloElemHeader with the optional parameters below.
Args:
element_type: One of OFPHET_*.
length: Length in bytes of the element, including this header,
excluding padding. | false |
1,637,871 | def stop(name):
'''
Stop the specified service
CLI Example:
.. code-block:: bash
salt '*' service.stop <service name>
'''
if _service_is_upstart(name):
cmd = 'stop {0}'.format(name)
else:
cmd = '/sbin/service {0} stop'.format(name)
return not __salt__['cmd.retcode'](cmd, python_shell=False) | [
"def",
"stop",
"(",
"name",
")",
":",
"if",
"_service_is_upstart",
"(",
"name",
")",
":",
"cmd",
"=",
"'stop {0}'",
".",
"format",
"(",
"name",
")",
"else",
":",
"cmd",
"=",
"'/sbin/service {0} stop'",
".",
"format",
"(",
"name",
")",
"return",
"not",
"__salt__",
"[",
"'cmd.retcode'",
"]",
"(",
"cmd",
",",
"python_shell",
"=",
"False",
")"
] | python | Stop the specified service
CLI Example:
.. code-block:: bash
salt '*' service.stop <service name> | false |
2,084,747 | def __init__(self, protocol, dataInput):
"""Initalize the Tahoma Device."""
self.__protocol = protocol
self.__raw_data = dataInput
self.__active_states = {}
debug_output = json.dumps(dataInput)
if 'label' not in dataInput.keys():
raise ValueError('No device name found: ' + debug_output)
self.__label = dataInput['label']
if 'controllableName' not in dataInput.keys():
raise ValueError('No control label name found: ' + debug_output)
self.__type = dataInput['controllableName']
if 'deviceURL' not in dataInput.keys():
raise ValueError('No control URL: ' + debug_output)
self.__url = dataInput['deviceURL']
# Parse definitions
if 'definition' not in dataInput.keys():
raise ValueError('No device definition found: ' + debug_output)
self.__definitions = {
'commands': [],
'states': []
}
definition = dataInput['definition']
if 'commands' in definition.keys():
for command in definition['commands']:
if command['commandName'] in self.__definitions['commands']:
raise ValueError("Command '" + command['commandName'] +
"' double defined - " + debug_output)
self.__definitions['commands'].append(command['commandName'])
if 'states' in definition.keys():
for state in definition['states']:
if state['qualifiedName'] in self.__definitions['states']:
raise ValueError("State '" + state['qualifiedName'] +
"' double defined - " + debug_output)
self.__definitions['states'].append(state['qualifiedName'])
# Parse active states
# calculate the amount of known active states
active_states_amount = 0
if 'states' in dataInput.keys():
for state in dataInput['states']:
active_states_amount += 1
# make sure there are not more active states than definitions
if active_states_amount > len(self.state_definitions):
raise ValueError(
"Missmatch of state definition and active states (" +
str(len(self.state_definitions)) + "/" +
str(active_states_amount) + "): " + debug_output)
if len(self.state_definitions) > 0:
if 'states' in dataInput.keys():
#raise ValueError("No active states given.")
for state in dataInput['states']:
if state['name'] not in self.state_definitions:
raise ValueError(
"Active state '" + state['name'] +
"' has not been defined: " + debug_output)
if state['name'] in self.__active_states.keys():
raise ValueError(
"Active state '" + state['name'] +
"' has been double defined: " + debug_output)
self.__active_states[state['name']] = state['value'] | [
"def",
"__init__",
"(",
"self",
",",
"protocol",
",",
"dataInput",
")",
":",
"self",
".",
"__protocol",
"=",
"protocol",
"self",
".",
"__raw_data",
"=",
"dataInput",
"self",
".",
"__active_states",
"=",
"{",
"}",
"debug_output",
"=",
"json",
".",
"dumps",
"(",
"dataInput",
")",
"if",
"'label'",
"not",
"in",
"dataInput",
".",
"keys",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'No device name found: '",
"+",
"debug_output",
")",
"self",
".",
"__label",
"=",
"dataInput",
"[",
"'label'",
"]",
"if",
"'controllableName'",
"not",
"in",
"dataInput",
".",
"keys",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'No control label name found: '",
"+",
"debug_output",
")",
"self",
".",
"__type",
"=",
"dataInput",
"[",
"'controllableName'",
"]",
"if",
"'deviceURL'",
"not",
"in",
"dataInput",
".",
"keys",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'No control URL: '",
"+",
"debug_output",
")",
"self",
".",
"__url",
"=",
"dataInput",
"[",
"'deviceURL'",
"]",
"if",
"'definition'",
"not",
"in",
"dataInput",
".",
"keys",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'No device definition found: '",
"+",
"debug_output",
")",
"self",
".",
"__definitions",
"=",
"{",
"'commands'",
":",
"[",
"]",
",",
"'states'",
":",
"[",
"]",
"}",
"definition",
"=",
"dataInput",
"[",
"'definition'",
"]",
"if",
"'commands'",
"in",
"definition",
".",
"keys",
"(",
")",
":",
"for",
"command",
"in",
"definition",
"[",
"'commands'",
"]",
":",
"if",
"command",
"[",
"'commandName'",
"]",
"in",
"self",
".",
"__definitions",
"[",
"'commands'",
"]",
":",
"raise",
"ValueError",
"(",
"\"Command '\"",
"+",
"command",
"[",
"'commandName'",
"]",
"+",
"\"' double defined - \"",
"+",
"debug_output",
")",
"self",
".",
"__definitions",
"[",
"'commands'",
"]",
".",
"append",
"(",
"command",
"[",
"'commandName'",
"]",
")",
"if",
"'states'",
"in",
"definition",
".",
"keys",
"(",
")",
":",
"for",
"state",
"in",
"definition",
"[",
"'states'",
"]",
":",
"if",
"state",
"[",
"'qualifiedName'",
"]",
"in",
"self",
".",
"__definitions",
"[",
"'states'",
"]",
":",
"raise",
"ValueError",
"(",
"\"State '\"",
"+",
"state",
"[",
"'qualifiedName'",
"]",
"+",
"\"' double defined - \"",
"+",
"debug_output",
")",
"self",
".",
"__definitions",
"[",
"'states'",
"]",
".",
"append",
"(",
"state",
"[",
"'qualifiedName'",
"]",
")",
"active_states_amount",
"=",
"0",
"if",
"'states'",
"in",
"dataInput",
".",
"keys",
"(",
")",
":",
"for",
"state",
"in",
"dataInput",
"[",
"'states'",
"]",
":",
"active_states_amount",
"+=",
"1",
"if",
"active_states_amount",
">",
"len",
"(",
"self",
".",
"state_definitions",
")",
":",
"raise",
"ValueError",
"(",
"\"Missmatch of state definition and active states (\"",
"+",
"str",
"(",
"len",
"(",
"self",
".",
"state_definitions",
")",
")",
"+",
"\"/\"",
"+",
"str",
"(",
"active_states_amount",
")",
"+",
"\"): \"",
"+",
"debug_output",
")",
"if",
"len",
"(",
"self",
".",
"state_definitions",
")",
">",
"0",
":",
"if",
"'states'",
"in",
"dataInput",
".",
"keys",
"(",
")",
":",
"for",
"state",
"in",
"dataInput",
"[",
"'states'",
"]",
":",
"if",
"state",
"[",
"'name'",
"]",
"not",
"in",
"self",
".",
"state_definitions",
":",
"raise",
"ValueError",
"(",
"\"Active state '\"",
"+",
"state",
"[",
"'name'",
"]",
"+",
"\"' has not been defined: \"",
"+",
"debug_output",
")",
"if",
"state",
"[",
"'name'",
"]",
"in",
"self",
".",
"__active_states",
".",
"keys",
"(",
")",
":",
"raise",
"ValueError",
"(",
"\"Active state '\"",
"+",
"state",
"[",
"'name'",
"]",
"+",
"\"' has been double defined: \"",
"+",
"debug_output",
")",
"self",
".",
"__active_states",
"[",
"state",
"[",
"'name'",
"]",
"]",
"=",
"state",
"[",
"'value'",
"]"
] | python | Initalize the Tahoma Device. | false |
2,049,329 | def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=False, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook,
use_decimal=use_decimal, **kw) | [
"def",
"load",
"(",
"fp",
",",
"encoding",
"=",
"None",
",",
"cls",
"=",
"None",
",",
"object_hook",
"=",
"None",
",",
"parse_float",
"=",
"None",
",",
"parse_int",
"=",
"None",
",",
"parse_constant",
"=",
"None",
",",
"object_pairs_hook",
"=",
"None",
",",
"use_decimal",
"=",
"False",
",",
"**",
"kw",
")",
":",
"return",
"loads",
"(",
"fp",
".",
"read",
"(",
")",
",",
"encoding",
"=",
"encoding",
",",
"cls",
"=",
"cls",
",",
"object_hook",
"=",
"object_hook",
",",
"parse_float",
"=",
"parse_float",
",",
"parse_int",
"=",
"parse_int",
",",
"parse_constant",
"=",
"parse_constant",
",",
"object_pairs_hook",
"=",
"object_pairs_hook",
",",
"use_decimal",
"=",
"use_decimal",
",",
"**",
"kw",
")"
] | python | Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``False``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg. | false |
2,151,964 | def __iadd__(self, target):
''' use this to combine databases '''
assert isinstance(target, RamGraphDB), 'graph databases can only be added to other graph databases'
for src, name, dst in target.list_relations():
self.store_relation(src, name, dst)
return self | [
"def",
"__iadd__",
"(",
"self",
",",
"target",
")",
":",
"assert",
"isinstance",
"(",
"target",
",",
"RamGraphDB",
")",
",",
"'graph databases can only be added to other graph databases'",
"for",
"src",
",",
"name",
",",
"dst",
"in",
"target",
".",
"list_relations",
"(",
")",
":",
"self",
".",
"store_relation",
"(",
"src",
",",
"name",
",",
"dst",
")",
"return",
"self"
] | python | use this to combine databases | false |
2,242,917 | def overall(goback = 0, case = 1):
""" To run all over the stock and to find who match the 'case'
'goback' is back to what days ago.
0 is the last day.
"""
from twseno import twseno
for i in twseno().allstock:
#timetest(i)
try:
if case == 1:
try:
a = goristock(i)
if goback:
a.goback(goback)
if a.MAO(3,6)[1] == '↑'.decode('utf-8') and (a.MAO(3,6)[0][1][-1] < 0 or ( a.MAO(3,6)[0][1][-1] < 1 and a.MAO(3,6)[0][1][-1] > 0 and a.MAO(3,6)[0][1][-2] < 0 and a.MAO(3,6)[0][0] == 3)) and a.VOLMAX3 and a.stock_vol[-1] > 1000*1000 and a.raw_data[-1] > 10:
#print a.Cmd_display
print 'buy-: ' + oop(a)
elif a.MAO(3,6)[1] == '↓'.decode('utf-8') and a.MAO(3,6)[0][1][-1] > 0 and a.MAO(3,6)[0][0] <= 3:
print 'sell: ' + oop(a)
except KeyboardInterrupt:
print '::KeyboardInterrupt'
break
except IndexError:
print i
elif case == 2:
try:
a = goristock(i)
if goback:
a.goback(goback)
if a.MAO(3,6)[1] == '↑'.decode('utf-8') and (a.MAO(3,6)[0][1][-1] < 0 or ( a.MAO(3,6)[0][1][-1] < 1 and a.MAO(3,6)[0][1][-1] > 0 and a.MAO(3,6)[0][1][-2] < 0 and a.MAO(3,6)[0][0] == 3)) and a.stock_vol[-1] >= 1000*1000 and a.raw_data[-1] > 10 and (sum(a.stock_vol[-45:])/45) <= 1000*1000:
#print a.Cmd_display
print 'buy-: ' + oop(a)
except KeyboardInterrupt:
print '::KeyboardInterrupt'
break
except IndexError:
print i
elif case == 3:
try:
a = goristock(i)
if goback:
a.goback(goback)
if a.MA(3) > a.raw_data[-1] and a.MA(6) <= a.raw_data[-1] and a.MA(6) > a.MA(18):
#print a.Cmd_display
print 'buy-: ' + oop(a)
except KeyboardInterrupt:
print '::KeyboardInterrupt'
break
except IndexError:
print i
except KeyboardInterrupt:
print 'KeyboardInterrupt'
break | [
"def",
"overall",
"(",
"goback",
"=",
"0",
",",
"case",
"=",
"1",
")",
":",
"from",
"twseno",
"import",
"twseno",
"for",
"i",
"in",
"twseno",
"(",
")",
".",
"allstock",
":",
"try",
":",
"if",
"case",
"==",
"1",
":",
"try",
":",
"a",
"=",
"goristock",
"(",
"i",
")",
"if",
"goback",
":",
"a",
".",
"goback",
"(",
"goback",
")",
"if",
"a",
".",
"MAO",
"(",
"3",
",",
"6",
")",
"[",
"1",
"]",
"==",
"'↑'",
".",
"decode",
"(",
"'utf-8'",
")",
"and",
"(",
"a",
".",
"MAO",
"(",
"3",
",",
"6",
")",
"[",
"0",
"]",
"[",
"1",
"]",
"[",
"-",
"1",
"]",
"<",
"0",
"or",
"(",
"a",
".",
"MAO",
"(",
"3",
",",
"6",
")",
"[",
"0",
"]",
"[",
"1",
"]",
"[",
"-",
"1",
"]",
"<",
"1",
"and",
"a",
".",
"MAO",
"(",
"3",
",",
"6",
")",
"[",
"0",
"]",
"[",
"1",
"]",
"[",
"-",
"1",
"]",
">",
"0",
"and",
"a",
".",
"MAO",
"(",
"3",
",",
"6",
")",
"[",
"0",
"]",
"[",
"1",
"]",
"[",
"-",
"2",
"]",
"<",
"0",
"and",
"a",
".",
"MAO",
"(",
"3",
",",
"6",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"3",
")",
")",
"and",
"a",
".",
"VOLMAX3",
"and",
"a",
".",
"stock_vol",
"[",
"-",
"1",
"]",
">",
"1000",
"*",
"1000",
"and",
"a",
".",
"raw_data",
"[",
"-",
"1",
"]",
">",
"10",
":",
"print",
"'buy-: '",
"+",
"oop",
"(",
"a",
")",
"elif",
"a",
".",
"MAO",
"(",
"3",
",",
"6",
")",
"[",
"1",
"]",
"==",
"'↓'",
".",
"decode",
"(",
"'utf-8'",
")",
"and",
"a",
".",
"MAO",
"(",
"3",
",",
"6",
")",
"[",
"0",
"]",
"[",
"1",
"]",
"[",
"-",
"1",
"]",
">",
"0",
"and",
"a",
".",
"MAO",
"(",
"3",
",",
"6",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"<=",
"3",
":",
"print",
"'sell: '",
"+",
"oop",
"(",
"a",
")",
"except",
"KeyboardInterrupt",
":",
"print",
"'::KeyboardInterrupt'",
"break",
"except",
"IndexError",
":",
"print",
"i",
"elif",
"case",
"==",
"2",
":",
"try",
":",
"a",
"=",
"goristock",
"(",
"i",
")",
"if",
"goback",
":",
"a",
".",
"goback",
"(",
"goback",
")",
"if",
"a",
".",
"MAO",
"(",
"3",
",",
"6",
")",
"[",
"1",
"]",
"==",
"'↑'",
".",
"decode",
"(",
"'utf-8'",
")",
"and",
"(",
"a",
".",
"MAO",
"(",
"3",
",",
"6",
")",
"[",
"0",
"]",
"[",
"1",
"]",
"[",
"-",
"1",
"]",
"<",
"0",
"or",
"(",
"a",
".",
"MAO",
"(",
"3",
",",
"6",
")",
"[",
"0",
"]",
"[",
"1",
"]",
"[",
"-",
"1",
"]",
"<",
"1",
"and",
"a",
".",
"MAO",
"(",
"3",
",",
"6",
")",
"[",
"0",
"]",
"[",
"1",
"]",
"[",
"-",
"1",
"]",
">",
"0",
"and",
"a",
".",
"MAO",
"(",
"3",
",",
"6",
")",
"[",
"0",
"]",
"[",
"1",
"]",
"[",
"-",
"2",
"]",
"<",
"0",
"and",
"a",
".",
"MAO",
"(",
"3",
",",
"6",
")",
"[",
"0",
"]",
"[",
"0",
"]",
"==",
"3",
")",
")",
"and",
"a",
".",
"stock_vol",
"[",
"-",
"1",
"]",
">=",
"1000",
"*",
"1000",
"and",
"a",
".",
"raw_data",
"[",
"-",
"1",
"]",
">",
"10",
"and",
"(",
"sum",
"(",
"a",
".",
"stock_vol",
"[",
"-",
"45",
":",
"]",
")",
"/",
"45",
")",
"<=",
"1000",
"*",
"1000",
":",
"print",
"'buy-: '",
"+",
"oop",
"(",
"a",
")",
"except",
"KeyboardInterrupt",
":",
"print",
"'::KeyboardInterrupt'",
"break",
"except",
"IndexError",
":",
"print",
"i",
"elif",
"case",
"==",
"3",
":",
"try",
":",
"a",
"=",
"goristock",
"(",
"i",
")",
"if",
"goback",
":",
"a",
".",
"goback",
"(",
"goback",
")",
"if",
"a",
".",
"MA",
"(",
"3",
")",
">",
"a",
".",
"raw_data",
"[",
"-",
"1",
"]",
"and",
"a",
".",
"MA",
"(",
"6",
")",
"<=",
"a",
".",
"raw_data",
"[",
"-",
"1",
"]",
"and",
"a",
".",
"MA",
"(",
"6",
")",
">",
"a",
".",
"MA",
"(",
"18",
")",
":",
"print",
"'buy-: '",
"+",
"oop",
"(",
"a",
")",
"except",
"KeyboardInterrupt",
":",
"print",
"'::KeyboardInterrupt'",
"break",
"except",
"IndexError",
":",
"print",
"i",
"except",
"KeyboardInterrupt",
":",
"print",
"'KeyboardInterrupt'",
"break"
] | python | To run all over the stock and to find who match the 'case'
'goback' is back to what days ago.
0 is the last day. | false |
2,148,006 | def get_default_config(self, jid, node=None):
"""
Request the default configuration of a node.
:param jid: Address of the PubSub service.
:type jid: :class:`aioxmpp.JID`
:param node: Name of the PubSub node to query.
:type node: :class:`str`
:raises aioxmpp.errors.XMPPError: as returned by the service
:return: The default configuration of subscriptions at the node.
:rtype: :class:`~.forms.Data`
On success, the :class:`~.forms.Data` form is returned.
If an error occurs, the corresponding :class:`~.errors.XMPPError` is
raised.
"""
iq = aioxmpp.stanza.IQ(to=jid, type_=aioxmpp.structs.IQType.GET)
iq.payload = pubsub_xso.Request(
pubsub_xso.Default(node=node)
)
response = yield from self.client.send(iq)
return response.payload.data | [
"def",
"get_default_config",
"(",
"self",
",",
"jid",
",",
"node",
"=",
"None",
")",
":",
"iq",
"=",
"aioxmpp",
".",
"stanza",
".",
"IQ",
"(",
"to",
"=",
"jid",
",",
"type_",
"=",
"aioxmpp",
".",
"structs",
".",
"IQType",
".",
"GET",
")",
"iq",
".",
"payload",
"=",
"pubsub_xso",
".",
"Request",
"(",
"pubsub_xso",
".",
"Default",
"(",
"node",
"=",
"node",
")",
")",
"response",
"=",
"yield",
"from",
"self",
".",
"client",
".",
"send",
"(",
"iq",
")",
"return",
"response",
".",
"payload",
".",
"data"
] | python | Request the default configuration of a node.
:param jid: Address of the PubSub service.
:type jid: :class:`aioxmpp.JID`
:param node: Name of the PubSub node to query.
:type node: :class:`str`
:raises aioxmpp.errors.XMPPError: as returned by the service
:return: The default configuration of subscriptions at the node.
:rtype: :class:`~.forms.Data`
On success, the :class:`~.forms.Data` form is returned.
If an error occurs, the corresponding :class:`~.errors.XMPPError` is
raised. | false |
1,986,334 | def _get_basin_depth_term(self, C, sites, period):
"""
In the case of the base model the basin depth term is switched off.
Therefore we return an array of zeros.
"""
if period < 0.65:
f_dz1 = np.zeros(len(sites.vs30), dtype=float)
else:
f_dz1 = C["f7"] + np.zeros(len(sites.vs30), dtype=float)
f_ratio = C["f7"] / C["f6"]
dz1 = (sites.z1pt0 / 1000.0) - california_basin_model(sites.vs30)
idx = dz1 <= f_ratio
f_dz1[idx] = C["f6"] * dz1[idx]
return f_dz1 | [
"def",
"_get_basin_depth_term",
"(",
"self",
",",
"C",
",",
"sites",
",",
"period",
")",
":",
"if",
"period",
"<",
"0.65",
":",
"f_dz1",
"=",
"np",
".",
"zeros",
"(",
"len",
"(",
"sites",
".",
"vs30",
")",
",",
"dtype",
"=",
"float",
")",
"else",
":",
"f_dz1",
"=",
"C",
"[",
"\"f7\"",
"]",
"+",
"np",
".",
"zeros",
"(",
"len",
"(",
"sites",
".",
"vs30",
")",
",",
"dtype",
"=",
"float",
")",
"f_ratio",
"=",
"C",
"[",
"\"f7\"",
"]",
"/",
"C",
"[",
"\"f6\"",
"]",
"dz1",
"=",
"(",
"sites",
".",
"z1pt0",
"/",
"1000.0",
")",
"-",
"california_basin_model",
"(",
"sites",
".",
"vs30",
")",
"idx",
"=",
"dz1",
"<=",
"f_ratio",
"f_dz1",
"[",
"idx",
"]",
"=",
"C",
"[",
"\"f6\"",
"]",
"*",
"dz1",
"[",
"idx",
"]",
"return",
"f_dz1"
] | python | In the case of the base model the basin depth term is switched off.
Therefore we return an array of zeros. | false |
2,310,547 | def complete(self, match, subject_graph):
"""Check the completeness of the ring match"""
if not CustomPattern.complete(self, match, subject_graph):
return False
if self.strong:
# If the ring is not strong, return False
if self.size % 2 == 0:
# even ring
for i in range(self.size//2):
vertex1_start = match.forward[i]
vertex1_stop = match.forward[(i+self.size//2)%self.size]
paths = list(subject_graph.iter_shortest_paths(vertex1_start, vertex1_stop))
if len(paths) != 2:
#print "Even ring must have two paths between opposite vertices"
return False
for path in paths:
if len(path) != self.size//2+1:
#print "Paths between opposite vertices must half the size of the ring+1"
return False
else:
# odd ring
for i in range(self.size//2+1):
vertex1_start = match.forward[i]
vertex1_stop = match.forward[(i+self.size//2)%self.size]
paths = list(subject_graph.iter_shortest_paths(vertex1_start, vertex1_stop))
if len(paths) > 1:
return False
if len(paths[0]) != self.size//2+1:
return False
vertex1_stop = match.forward[(i+self.size//2+1)%self.size]
paths = list(subject_graph.iter_shortest_paths(vertex1_start, vertex1_stop))
if len(paths) > 1:
return False
if len(paths[0]) != self.size//2+1:
return False
return True | [
"def",
"complete",
"(",
"self",
",",
"match",
",",
"subject_graph",
")",
":",
"if",
"not",
"CustomPattern",
".",
"complete",
"(",
"self",
",",
"match",
",",
"subject_graph",
")",
":",
"return",
"False",
"if",
"self",
".",
"strong",
":",
"if",
"self",
".",
"size",
"%",
"2",
"==",
"0",
":",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"size",
"//",
"2",
")",
":",
"vertex1_start",
"=",
"match",
".",
"forward",
"[",
"i",
"]",
"vertex1_stop",
"=",
"match",
".",
"forward",
"[",
"(",
"i",
"+",
"self",
".",
"size",
"//",
"2",
")",
"%",
"self",
".",
"size",
"]",
"paths",
"=",
"list",
"(",
"subject_graph",
".",
"iter_shortest_paths",
"(",
"vertex1_start",
",",
"vertex1_stop",
")",
")",
"if",
"len",
"(",
"paths",
")",
"!=",
"2",
":",
"return",
"False",
"for",
"path",
"in",
"paths",
":",
"if",
"len",
"(",
"path",
")",
"!=",
"self",
".",
"size",
"//",
"2",
"+",
"1",
":",
"return",
"False",
"else",
":",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"size",
"//",
"2",
"+",
"1",
")",
":",
"vertex1_start",
"=",
"match",
".",
"forward",
"[",
"i",
"]",
"vertex1_stop",
"=",
"match",
".",
"forward",
"[",
"(",
"i",
"+",
"self",
".",
"size",
"//",
"2",
")",
"%",
"self",
".",
"size",
"]",
"paths",
"=",
"list",
"(",
"subject_graph",
".",
"iter_shortest_paths",
"(",
"vertex1_start",
",",
"vertex1_stop",
")",
")",
"if",
"len",
"(",
"paths",
")",
">",
"1",
":",
"return",
"False",
"if",
"len",
"(",
"paths",
"[",
"0",
"]",
")",
"!=",
"self",
".",
"size",
"//",
"2",
"+",
"1",
":",
"return",
"False",
"vertex1_stop",
"=",
"match",
".",
"forward",
"[",
"(",
"i",
"+",
"self",
".",
"size",
"//",
"2",
"+",
"1",
")",
"%",
"self",
".",
"size",
"]",
"paths",
"=",
"list",
"(",
"subject_graph",
".",
"iter_shortest_paths",
"(",
"vertex1_start",
",",
"vertex1_stop",
")",
")",
"if",
"len",
"(",
"paths",
")",
">",
"1",
":",
"return",
"False",
"if",
"len",
"(",
"paths",
"[",
"0",
"]",
")",
"!=",
"self",
".",
"size",
"//",
"2",
"+",
"1",
":",
"return",
"False",
"return",
"True"
] | python | Check the completeness of the ring match | false |
1,655,686 | def send_command(self, *args, **kwargs):
"""
Send command to network device retrieve output until router_prompt or expect_string
By default this method will keep waiting to receive data until the network device prompt is
detected. The current network device prompt will be determined automatically.
command_string = command to execute
expect_string = pattern to search for uses re.search (use raw strings)
delay_factor = decrease the initial delay before we start looking for data
max_loops = number of iterations before we give up and raise an exception
strip_prompt = strip the trailing prompt from the output
strip_command = strip the leading command from the output
"""
if len(args) >= 2:
expect_string = args[1]
else:
expect_string = kwargs.get("expect_string")
if expect_string is None:
expect_string = r"(OK|ERROR|Command not recognized\.)"
expect_string = self.RETURN + expect_string + self.RETURN
kwargs.setdefault("expect_string", expect_string)
output = super(CiscoSSHConnection, self).send_command(*args, **kwargs)
return output | [
"def",
"send_command",
"(",
"self",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"if",
"len",
"(",
"args",
")",
">=",
"2",
":",
"expect_string",
"=",
"args",
"[",
"1",
"]",
"else",
":",
"expect_string",
"=",
"kwargs",
".",
"get",
"(",
"\"expect_string\"",
")",
"if",
"expect_string",
"is",
"None",
":",
"expect_string",
"=",
"r\"(OK|ERROR|Command not recognized\\.)\"",
"expect_string",
"=",
"self",
".",
"RETURN",
"+",
"expect_string",
"+",
"self",
".",
"RETURN",
"kwargs",
".",
"setdefault",
"(",
"\"expect_string\"",
",",
"expect_string",
")",
"output",
"=",
"super",
"(",
"CiscoSSHConnection",
",",
"self",
")",
".",
"send_command",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"return",
"output"
] | python | Send command to network device retrieve output until router_prompt or expect_string
By default this method will keep waiting to receive data until the network device prompt is
detected. The current network device prompt will be determined automatically.
command_string = command to execute
expect_string = pattern to search for uses re.search (use raw strings)
delay_factor = decrease the initial delay before we start looking for data
max_loops = number of iterations before we give up and raise an exception
strip_prompt = strip the trailing prompt from the output
strip_command = strip the leading command from the output | false |
2,169,025 | def stopService(self):
"""
Gracefully stop the service.
Returns:
defer.Deferred: a Deferred which is triggered when the service has
finished shutting down.
"""
self._service.factory.stopTrying()
yield self._service.factory.stopFactory()
yield service.MultiService.stopService(self) | [
"def",
"stopService",
"(",
"self",
")",
":",
"self",
".",
"_service",
".",
"factory",
".",
"stopTrying",
"(",
")",
"yield",
"self",
".",
"_service",
".",
"factory",
".",
"stopFactory",
"(",
")",
"yield",
"service",
".",
"MultiService",
".",
"stopService",
"(",
"self",
")"
] | python | Gracefully stop the service.
Returns:
defer.Deferred: a Deferred which is triggered when the service has
finished shutting down. | false |
2,250,747 | def __init__(self, meta=None, coordinator=None):
"""The future associated to a submitted transfer request
:type meta: TransferMeta
:param meta: The metadata associated to the request. This object
is visible to the requester.
:type coordinator: TransferCoordinator
:param coordinator: The coordinator associated to the request. This
object is not visible to the requester.
"""
self._meta = meta
self._coordinator = coordinator | [
"def",
"__init__",
"(",
"self",
",",
"meta",
"=",
"None",
",",
"coordinator",
"=",
"None",
")",
":",
"self",
".",
"_meta",
"=",
"meta",
"self",
".",
"_coordinator",
"=",
"coordinator"
] | python | The future associated to a submitted transfer request
:type meta: TransferMeta
:param meta: The metadata associated to the request. This object
is visible to the requester.
:type coordinator: TransferCoordinator
:param coordinator: The coordinator associated to the request. This
object is not visible to the requester. | false |
2,547,572 | def snmp_server_user_username(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
snmp_server = ET.SubElement(config, "snmp-server", xmlns="urn:brocade.com:mgmt:brocade-snmp")
user = ET.SubElement(snmp_server, "user")
username = ET.SubElement(user, "username")
username.text = kwargs.pop('username')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"snmp_server_user_username",
"(",
"self",
",",
"**",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"snmp_server",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"snmp-server\"",
",",
"xmlns",
"=",
"\"urn:brocade.com:mgmt:brocade-snmp\"",
")",
"user",
"=",
"ET",
".",
"SubElement",
"(",
"snmp_server",
",",
"\"user\"",
")",
"username",
"=",
"ET",
".",
"SubElement",
"(",
"user",
",",
"\"username\"",
")",
"username",
".",
"text",
"=",
"kwargs",
".",
"pop",
"(",
"'username'",
")",
"callback",
"=",
"kwargs",
".",
"pop",
"(",
"'callback'",
",",
"self",
".",
"_callback",
")",
"return",
"callback",
"(",
"config",
")"
] | python | Auto Generated Code | false |
2,458,274 | def log(x, base=None):
""" log(x, base=e)
Logarithmic function.
"""
_math = infer_math(x)
if base is None:
return _math.log(x)
elif _math == math:
return _math.log(x, base)
else:
# numpy has no option to set a base
return _math.log(x) / _math.log(base) | [
"def",
"log",
"(",
"x",
",",
"base",
"=",
"None",
")",
":",
"_math",
"=",
"infer_math",
"(",
"x",
")",
"if",
"base",
"is",
"None",
":",
"return",
"_math",
".",
"log",
"(",
"x",
")",
"elif",
"_math",
"==",
"math",
":",
"return",
"_math",
".",
"log",
"(",
"x",
",",
"base",
")",
"else",
":",
"return",
"_math",
".",
"log",
"(",
"x",
")",
"/",
"_math",
".",
"log",
"(",
"base",
")"
] | python | log(x, base=e)
Logarithmic function. | false |
1,904,750 | def raw_decrypt(self, ciphertext):
"""Decrypt raw ciphertext and return raw plaintext.
Args:
ciphertext (int): (usually from :meth:`EncryptedNumber.ciphertext()`)
that is to be Paillier decrypted.
Returns:
int: Paillier decryption of ciphertext. This is a positive
integer < :attr:`public_key.n`.
Raises:
TypeError: if ciphertext is not an int.
"""
if not isinstance(ciphertext, int):
raise TypeError('Expected ciphertext to be an int, not: %s' %
type(ciphertext))
decrypt_to_p = self.l_function(powmod(ciphertext, self.p-1, self.psquare), self.p) * self.hp % self.p
decrypt_to_q = self.l_function(powmod(ciphertext, self.q-1, self.qsquare), self.q) * self.hq % self.q
return self.crt(decrypt_to_p, decrypt_to_q) | [
"def",
"raw_decrypt",
"(",
"self",
",",
"ciphertext",
")",
":",
"if",
"not",
"isinstance",
"(",
"ciphertext",
",",
"int",
")",
":",
"raise",
"TypeError",
"(",
"'Expected ciphertext to be an int, not: %s'",
"%",
"type",
"(",
"ciphertext",
")",
")",
"decrypt_to_p",
"=",
"self",
".",
"l_function",
"(",
"powmod",
"(",
"ciphertext",
",",
"self",
".",
"p",
"-",
"1",
",",
"self",
".",
"psquare",
")",
",",
"self",
".",
"p",
")",
"*",
"self",
".",
"hp",
"%",
"self",
".",
"p",
"decrypt_to_q",
"=",
"self",
".",
"l_function",
"(",
"powmod",
"(",
"ciphertext",
",",
"self",
".",
"q",
"-",
"1",
",",
"self",
".",
"qsquare",
")",
",",
"self",
".",
"q",
")",
"*",
"self",
".",
"hq",
"%",
"self",
".",
"q",
"return",
"self",
".",
"crt",
"(",
"decrypt_to_p",
",",
"decrypt_to_q",
")"
] | python | Decrypt raw ciphertext and return raw plaintext.
Args:
ciphertext (int): (usually from :meth:`EncryptedNumber.ciphertext()`)
that is to be Paillier decrypted.
Returns:
int: Paillier decryption of ciphertext. This is a positive
integer < :attr:`public_key.n`.
Raises:
TypeError: if ciphertext is not an int. | false |
2,532,952 | def mutator(*cache_names):
"""Decorator for ``Document`` methods that change the document.
This decorator ensures that the object's caches are kept in sync
when changes are made.
"""
def deco(fn):
@wraps(fn)
def _fn(self, *args, **kwargs):
try:
return fn(self, *args, **kwargs)
finally:
for cache_name in cache_names:
setattr(self, cache_name, None)
return _fn
return deco | [
"def",
"mutator",
"(",
"*",
"cache_names",
")",
":",
"def",
"deco",
"(",
"fn",
")",
":",
"@",
"wraps",
"(",
"fn",
")",
"def",
"_fn",
"(",
"self",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"try",
":",
"return",
"fn",
"(",
"self",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
"finally",
":",
"for",
"cache_name",
"in",
"cache_names",
":",
"setattr",
"(",
"self",
",",
"cache_name",
",",
"None",
")",
"return",
"_fn",
"return",
"deco"
] | python | Decorator for ``Document`` methods that change the document.
This decorator ensures that the object's caches are kept in sync
when changes are made. | false |
2,595,123 | def import_data_dir(target_zip):
"""
Imports the data specified by param <target_zip>. Renames the data dir if it already exists and
unpacks the zip sub dir __data__ directly within the current active product.
:param target_zip: string path to the zip file.
"""
from django_productline.context import PRODUCT_CONTEXT
new_data_dir = '{data_dir}_before_import_{ts}'.format(
data_dir=PRODUCT_CONTEXT.DATA_DIR,
ts=datetime.datetime.now().strftime("%Y-%m-%d.%H:%M:%S:%s")
)
if os.path.exists(PRODUCT_CONTEXT.DATA_DIR):
# rename an existing data dir if it exists
tasks.mv_data_dir(new_data_dir)
z = zipfile.ZipFile(target_zip)
def filter_func(x):
return x.startswith('__data__/')
z.extractall(os.path.dirname(PRODUCT_CONTEXT.DATA_DIR), filter(filter_func, z.namelist())) | [
"def",
"import_data_dir",
"(",
"target_zip",
")",
":",
"from",
"django_productline",
".",
"context",
"import",
"PRODUCT_CONTEXT",
"new_data_dir",
"=",
"'{data_dir}_before_import_{ts}'",
".",
"format",
"(",
"data_dir",
"=",
"PRODUCT_CONTEXT",
".",
"DATA_DIR",
",",
"ts",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"\"%Y-%m-%d.%H:%M:%S:%s\"",
")",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"PRODUCT_CONTEXT",
".",
"DATA_DIR",
")",
":",
"tasks",
".",
"mv_data_dir",
"(",
"new_data_dir",
")",
"z",
"=",
"zipfile",
".",
"ZipFile",
"(",
"target_zip",
")",
"def",
"filter_func",
"(",
"x",
")",
":",
"return",
"x",
".",
"startswith",
"(",
"'__data__/'",
")",
"z",
".",
"extractall",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"PRODUCT_CONTEXT",
".",
"DATA_DIR",
")",
",",
"filter",
"(",
"filter_func",
",",
"z",
".",
"namelist",
"(",
")",
")",
")"
] | python | Imports the data specified by param <target_zip>. Renames the data dir if it already exists and
unpacks the zip sub dir __data__ directly within the current active product.
:param target_zip: string path to the zip file. | false |
2,594,854 | def automain(module, *, args=(), kwargs=None):
'''
This decorator automatically invokes a function if the module is being run
as the "__main__" module. Optionally, provide args or kwargs with which to
call the function. If `module` is "__main__", the function is called, and
the program is `sys.exit`ed with the return value. You can also pass `True`
to cause the function to be called unconditionally. If the function is not
called, it is returned unchanged by the decorator.
Usage:
@automain(__name__) # Pass __name__ to check __name__=="__main__"
def main():
...
If __name__ is "__main__" here, the main function is called, and then
sys.exit called with the return value.
'''
# Check that @automain(...) was called, rather than @automain
if callable(module):
raise AutomainRequiresModuleError(module)
if module == '__main__' or module is True:
if kwargs is None:
kwargs = {}
# Use a function definition instead of a lambda for a neater traceback
def automain_decorator(main):
sys.exit(main(*args, **kwargs))
return automain_decorator
else:
return lambda main: main | [
"def",
"automain",
"(",
"module",
",",
"*",
",",
"args",
"=",
"(",
")",
",",
"kwargs",
"=",
"None",
")",
":",
"if",
"callable",
"(",
"module",
")",
":",
"raise",
"AutomainRequiresModuleError",
"(",
"module",
")",
"if",
"module",
"==",
"'__main__'",
"or",
"module",
"is",
"True",
":",
"if",
"kwargs",
"is",
"None",
":",
"kwargs",
"=",
"{",
"}",
"def",
"automain_decorator",
"(",
"main",
")",
":",
"sys",
".",
"exit",
"(",
"main",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
")",
"return",
"automain_decorator",
"else",
":",
"return",
"lambda",
"main",
":",
"main"
] | python | This decorator automatically invokes a function if the module is being run
as the "__main__" module. Optionally, provide args or kwargs with which to
call the function. If `module` is "__main__", the function is called, and
the program is `sys.exit`ed with the return value. You can also pass `True`
to cause the function to be called unconditionally. If the function is not
called, it is returned unchanged by the decorator.
Usage:
@automain(__name__) # Pass __name__ to check __name__=="__main__"
def main():
...
If __name__ is "__main__" here, the main function is called, and then
sys.exit called with the return value. | false |
2,162,120 | def get_info(df, group, info=['mean', 'std']):
"""
Aggregate mean and std with the given group.
"""
agg = df.groupby(group).agg(info)
agg.columns = agg.columns.droplevel(0)
return agg | [
"def",
"get_info",
"(",
"df",
",",
"group",
",",
"info",
"=",
"[",
"'mean'",
",",
"'std'",
"]",
")",
":",
"agg",
"=",
"df",
".",
"groupby",
"(",
"group",
")",
".",
"agg",
"(",
"info",
")",
"agg",
".",
"columns",
"=",
"agg",
".",
"columns",
".",
"droplevel",
"(",
"0",
")",
"return",
"agg"
] | python | Aggregate mean and std with the given group. | false |
2,404,918 | def store_report(self, payload):
"""
Save the crash report to a file. Keeping the last `offline_report_limit` files in a cyclical FIFO buffer.
The newest crash report always named is 01
"""
offline_reports = self.get_offline_reports()
if offline_reports:
# Increment the name of all existing reports 1 --> 2, 2 --> 3 etc.
for ii, report in enumerate(reversed(offline_reports)):
rpath, ext = os.path.splitext(report)
n = int(re.findall('(\d+)', rpath)[-1])
new_name = os.path.join(self.report_dir, self._report_name % (n + 1)) + ext
shutil.copy2(report, new_name)
os.remove(report)
# Delete the oldest report
if len(offline_reports) >= self.offline_report_limit:
oldest = glob.glob(os.path.join(self.report_dir, self._report_name % (self.offline_report_limit+1) + '*'))[0]
os.remove(oldest)
new_report_path = os.path.join(self.report_dir, self._report_name % 1 + '.json')
# Write a new report
with open(new_report_path, 'w') as _f:
json.dump(payload, _f)
return new_report_path | [
"def",
"store_report",
"(",
"self",
",",
"payload",
")",
":",
"offline_reports",
"=",
"self",
".",
"get_offline_reports",
"(",
")",
"if",
"offline_reports",
":",
"for",
"ii",
",",
"report",
"in",
"enumerate",
"(",
"reversed",
"(",
"offline_reports",
")",
")",
":",
"rpath",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"report",
")",
"n",
"=",
"int",
"(",
"re",
".",
"findall",
"(",
"'(\\d+)'",
",",
"rpath",
")",
"[",
"-",
"1",
"]",
")",
"new_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"report_dir",
",",
"self",
".",
"_report_name",
"%",
"(",
"n",
"+",
"1",
")",
")",
"+",
"ext",
"shutil",
".",
"copy2",
"(",
"report",
",",
"new_name",
")",
"os",
".",
"remove",
"(",
"report",
")",
"if",
"len",
"(",
"offline_reports",
")",
">=",
"self",
".",
"offline_report_limit",
":",
"oldest",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"report_dir",
",",
"self",
".",
"_report_name",
"%",
"(",
"self",
".",
"offline_report_limit",
"+",
"1",
")",
"+",
"'*'",
")",
")",
"[",
"0",
"]",
"os",
".",
"remove",
"(",
"oldest",
")",
"new_report_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"report_dir",
",",
"self",
".",
"_report_name",
"%",
"1",
"+",
"'.json'",
")",
"with",
"open",
"(",
"new_report_path",
",",
"'w'",
")",
"as",
"_f",
":",
"json",
".",
"dump",
"(",
"payload",
",",
"_f",
")",
"return",
"new_report_path"
] | python | Save the crash report to a file. Keeping the last `offline_report_limit` files in a cyclical FIFO buffer.
The newest crash report always named is 01 | false |
2,617,574 | def __init__(self, generator, weak_generator=None, *args, **kwargs):
"""__init__
:type generator: generator
:param generator: The generator object.
:type weak_generator: weakref.ref
:param weak_generator: Weak reference to a generator. Optional.
For other parameters see :meth:`WeakGeneratorWrapper.__init__`.
"""
# It's important that the weak_generator object reference is preserved
# because it will hold `finalize_callback` from @send_self.
self.generator = generator
if weak_generator is None:
weak_generator = weakref.ref(generator)
super(StrongGeneratorWrapper, self).__init__(weak_generator, *args,
**kwargs) | [
"def",
"__init__",
"(",
"self",
",",
"generator",
",",
"weak_generator",
"=",
"None",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"self",
".",
"generator",
"=",
"generator",
"if",
"weak_generator",
"is",
"None",
":",
"weak_generator",
"=",
"weakref",
".",
"ref",
"(",
"generator",
")",
"super",
"(",
"StrongGeneratorWrapper",
",",
"self",
")",
".",
"__init__",
"(",
"weak_generator",
",",
"*",
"args",
",",
"**",
"kwargs",
")"
] | python | __init__
:type generator: generator
:param generator: The generator object.
:type weak_generator: weakref.ref
:param weak_generator: Weak reference to a generator. Optional.
For other parameters see :meth:`WeakGeneratorWrapper.__init__`. | false |
2,626,635 | def publish(self, value):
"""
Accepts: list of tuples in the format (ip, port)
Returns: unicode
"""
if not isinstance(value, list):
raise ValueError(value)
slaves = ['%s:%d' % x for x in value]
return unicode(", ".join(slaves)) | [
"def",
"publish",
"(",
"self",
",",
"value",
")",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"raise",
"ValueError",
"(",
"value",
")",
"slaves",
"=",
"[",
"'%s:%d'",
"%",
"x",
"for",
"x",
"in",
"value",
"]",
"return",
"unicode",
"(",
"\", \"",
".",
"join",
"(",
"slaves",
")",
")"
] | python | Accepts: list of tuples in the format (ip, port)
Returns: unicode | false |
2,103,112 | def _format_arguments(ctx):
"""Format all `click.Argument` for a `click.Command`."""
params = [x for x in ctx.command.params if isinstance(x, click.Argument)]
for param in params:
for line in _format_argument(param):
yield line
yield '' | [
"def",
"_format_arguments",
"(",
"ctx",
")",
":",
"params",
"=",
"[",
"x",
"for",
"x",
"in",
"ctx",
".",
"command",
".",
"params",
"if",
"isinstance",
"(",
"x",
",",
"click",
".",
"Argument",
")",
"]",
"for",
"param",
"in",
"params",
":",
"for",
"line",
"in",
"_format_argument",
"(",
"param",
")",
":",
"yield",
"line",
"yield",
"''"
] | python | Format all `click.Argument` for a `click.Command`. | false |
2,291,624 | def _validate_config(self):
""" ensure REQUIRED_CONFIG_KEYS are filled """
# exit if no backend specified
if not self.backend:
return
# exit if no required config keys
if len(self.REQUIRED_CONFIG_KEYS) < 1:
return
self.config = self.config or {} # default to empty dict of no config
required_keys_set = set(self.REQUIRED_CONFIG_KEYS)
config_keys_set = set(self.config.keys())
missing_required_keys = required_keys_set - config_keys_set
unrecognized_keys = config_keys_set - required_keys_set
# if any missing required key raise ValidationError
if len(missing_required_keys) > 0:
# converts list in comma separated string
missing_keys_string = ', '.join(missing_required_keys)
# django error
raise ValidationError(_('Missing required config keys: "%s"') % missing_keys_string)
elif len(unrecognized_keys) > 0:
# converts list in comma separated string
unrecognized_keys_string = ', '.join(unrecognized_keys)
# django error
raise ValidationError(_('Unrecognized config keys: "%s"') % unrecognized_keys_string) | [
"def",
"_validate_config",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"backend",
":",
"return",
"if",
"len",
"(",
"self",
".",
"REQUIRED_CONFIG_KEYS",
")",
"<",
"1",
":",
"return",
"self",
".",
"config",
"=",
"self",
".",
"config",
"or",
"{",
"}",
"required_keys_set",
"=",
"set",
"(",
"self",
".",
"REQUIRED_CONFIG_KEYS",
")",
"config_keys_set",
"=",
"set",
"(",
"self",
".",
"config",
".",
"keys",
"(",
")",
")",
"missing_required_keys",
"=",
"required_keys_set",
"-",
"config_keys_set",
"unrecognized_keys",
"=",
"config_keys_set",
"-",
"required_keys_set",
"if",
"len",
"(",
"missing_required_keys",
")",
">",
"0",
":",
"missing_keys_string",
"=",
"', '",
".",
"join",
"(",
"missing_required_keys",
")",
"raise",
"ValidationError",
"(",
"_",
"(",
"'Missing required config keys: \"%s\"'",
")",
"%",
"missing_keys_string",
")",
"elif",
"len",
"(",
"unrecognized_keys",
")",
">",
"0",
":",
"unrecognized_keys_string",
"=",
"', '",
".",
"join",
"(",
"unrecognized_keys",
")",
"raise",
"ValidationError",
"(",
"_",
"(",
"'Unrecognized config keys: \"%s\"'",
")",
"%",
"unrecognized_keys_string",
")"
] | python | ensure REQUIRED_CONFIG_KEYS are filled | false |
1,581,480 | def feature_importances_(self):
"""
Feature importances property
.. note:: Feature importance is defined only for tree boosters
Feature importance is only defined when the decision tree model is chosen as base
learner (`booster=gbtree`). It is not defined for other base learner types, such
as linear learners (`booster=gblinear`).
Returns
-------
feature_importances_ : array of shape ``[n_features]``
"""
if getattr(self, 'booster', None) is not None and self.booster != 'gbtree':
raise AttributeError('Feature importance is not defined for Booster type {}'
.format(self.booster))
b = self.get_booster()
score = b.get_score(importance_type=self.importance_type)
all_features = [score.get(f, 0.) for f in b.feature_names]
all_features = np.array(all_features, dtype=np.float32)
return all_features / all_features.sum() | [
"def",
"feature_importances_",
"(",
"self",
")",
":",
"if",
"getattr",
"(",
"self",
",",
"'booster'",
",",
"None",
")",
"is",
"not",
"None",
"and",
"self",
".",
"booster",
"!=",
"'gbtree'",
":",
"raise",
"AttributeError",
"(",
"'Feature importance is not defined for Booster type {}'",
".",
"format",
"(",
"self",
".",
"booster",
")",
")",
"b",
"=",
"self",
".",
"get_booster",
"(",
")",
"score",
"=",
"b",
".",
"get_score",
"(",
"importance_type",
"=",
"self",
".",
"importance_type",
")",
"all_features",
"=",
"[",
"score",
".",
"get",
"(",
"f",
",",
"0.",
")",
"for",
"f",
"in",
"b",
".",
"feature_names",
"]",
"all_features",
"=",
"np",
".",
"array",
"(",
"all_features",
",",
"dtype",
"=",
"np",
".",
"float32",
")",
"return",
"all_features",
"/",
"all_features",
".",
"sum",
"(",
")"
] | python | Feature importances property
.. note:: Feature importance is defined only for tree boosters
Feature importance is only defined when the decision tree model is chosen as base
learner (`booster=gbtree`). It is not defined for other base learner types, such
as linear learners (`booster=gblinear`).
Returns
-------
feature_importances_ : array of shape ``[n_features]`` | false |
2,039,147 | def __enter__(self):
"""Open MemoryFile, write data and return."""
self.fio_memfile = MemoryFile()
with self.fio_memfile.open(
schema=self.schema,
driver=self.driver,
crs=self.tile.crs
) as dst:
dst.writerecords(self.features)
return self.fio_memfile | [
"def",
"__enter__",
"(",
"self",
")",
":",
"self",
".",
"fio_memfile",
"=",
"MemoryFile",
"(",
")",
"with",
"self",
".",
"fio_memfile",
".",
"open",
"(",
"schema",
"=",
"self",
".",
"schema",
",",
"driver",
"=",
"self",
".",
"driver",
",",
"crs",
"=",
"self",
".",
"tile",
".",
"crs",
")",
"as",
"dst",
":",
"dst",
".",
"writerecords",
"(",
"self",
".",
"features",
")",
"return",
"self",
".",
"fio_memfile"
] | python | Open MemoryFile, write data and return. | false |
2,408,247 | def retrieve(self, id) :
"""
Retrieve a single order
Returns a single order available to the user, according to the unique order ID provided
If the specified order does not exist, the request will return an error
:calls: ``get /orders/{id}``
:param int id: Unique identifier of a Order.
:return: Dictionary that support attriubte-style access and represent Order resource.
:rtype: dict
"""
_, _, order = self.http_client.get("/orders/{id}".format(id=id))
return order | [
"def",
"retrieve",
"(",
"self",
",",
"id",
")",
":",
"_",
",",
"_",
",",
"order",
"=",
"self",
".",
"http_client",
".",
"get",
"(",
"\"/orders/{id}\"",
".",
"format",
"(",
"id",
"=",
"id",
")",
")",
"return",
"order"
] | python | Retrieve a single order
Returns a single order available to the user, according to the unique order ID provided
If the specified order does not exist, the request will return an error
:calls: ``get /orders/{id}``
:param int id: Unique identifier of a Order.
:return: Dictionary that support attriubte-style access and represent Order resource.
:rtype: dict | false |
1,926,585 | def reindex(args):
"""
%prog agpfile
assume the component line order is correct, modify coordinates, this is
necessary mostly due to manual edits (insert/delete) that disrupts
the target coordinates.
"""
p = OptionParser(reindex.__doc__)
p.add_option("--nogaps", default=False, action="store_true",
help="Remove all gap lines [default: %default]")
p.add_option("--inplace", default=False, action="store_true",
help="Replace input file [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
agpfile, = args
inplace = opts.inplace
agp = AGP(agpfile, validate=False)
pf = agpfile.rsplit(".", 1)[0]
newagpfile = pf + ".reindexed.agp"
fw = open(newagpfile, "w")
agp.transfer_header(fw)
for chr, chr_agp in groupby(agp, lambda x: x.object):
chr_agp = list(chr_agp)
object_beg = 1
for i, b in enumerate(chr_agp):
b.object_beg = object_beg
b.part_number = i + 1
if opts.nogaps and b.is_gap:
continue
if b.is_gap:
b.object_end = object_beg + b.gap_length - 1
else:
b.object_end = object_beg + b.component_span - 1
object_beg = b.object_end + 1
print(str(b), file=fw)
# Last step: validate the new agpfile
fw.close()
agp = AGP(newagpfile, validate=True)
if inplace:
shutil.move(newagpfile, agpfile)
logging.debug("Rename file `{0}` to `{1}`".format(newagpfile, agpfile))
newagpfile = agpfile
return newagpfile | [
"def",
"reindex",
"(",
"args",
")",
":",
"p",
"=",
"OptionParser",
"(",
"reindex",
".",
"__doc__",
")",
"p",
".",
"add_option",
"(",
"\"--nogaps\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Remove all gap lines [default: %default]\"",
")",
"p",
".",
"add_option",
"(",
"\"--inplace\"",
",",
"default",
"=",
"False",
",",
"action",
"=",
"\"store_true\"",
",",
"help",
"=",
"\"Replace input file [default: %default]\"",
")",
"opts",
",",
"args",
"=",
"p",
".",
"parse_args",
"(",
"args",
")",
"if",
"len",
"(",
"args",
")",
"!=",
"1",
":",
"sys",
".",
"exit",
"(",
"p",
".",
"print_help",
"(",
")",
")",
"agpfile",
",",
"=",
"args",
"inplace",
"=",
"opts",
".",
"inplace",
"agp",
"=",
"AGP",
"(",
"agpfile",
",",
"validate",
"=",
"False",
")",
"pf",
"=",
"agpfile",
".",
"rsplit",
"(",
"\".\"",
",",
"1",
")",
"[",
"0",
"]",
"newagpfile",
"=",
"pf",
"+",
"\".reindexed.agp\"",
"fw",
"=",
"open",
"(",
"newagpfile",
",",
"\"w\"",
")",
"agp",
".",
"transfer_header",
"(",
"fw",
")",
"for",
"chr",
",",
"chr_agp",
"in",
"groupby",
"(",
"agp",
",",
"lambda",
"x",
":",
"x",
".",
"object",
")",
":",
"chr_agp",
"=",
"list",
"(",
"chr_agp",
")",
"object_beg",
"=",
"1",
"for",
"i",
",",
"b",
"in",
"enumerate",
"(",
"chr_agp",
")",
":",
"b",
".",
"object_beg",
"=",
"object_beg",
"b",
".",
"part_number",
"=",
"i",
"+",
"1",
"if",
"opts",
".",
"nogaps",
"and",
"b",
".",
"is_gap",
":",
"continue",
"if",
"b",
".",
"is_gap",
":",
"b",
".",
"object_end",
"=",
"object_beg",
"+",
"b",
".",
"gap_length",
"-",
"1",
"else",
":",
"b",
".",
"object_end",
"=",
"object_beg",
"+",
"b",
".",
"component_span",
"-",
"1",
"object_beg",
"=",
"b",
".",
"object_end",
"+",
"1",
"print",
"(",
"str",
"(",
"b",
")",
",",
"file",
"=",
"fw",
")",
"fw",
".",
"close",
"(",
")",
"agp",
"=",
"AGP",
"(",
"newagpfile",
",",
"validate",
"=",
"True",
")",
"if",
"inplace",
":",
"shutil",
".",
"move",
"(",
"newagpfile",
",",
"agpfile",
")",
"logging",
".",
"debug",
"(",
"\"Rename file `{0}` to `{1}`\"",
".",
"format",
"(",
"newagpfile",
",",
"agpfile",
")",
")",
"newagpfile",
"=",
"agpfile",
"return",
"newagpfile"
] | python | %prog agpfile
assume the component line order is correct, modify coordinates, this is
necessary mostly due to manual edits (insert/delete) that disrupts
the target coordinates. | false |
1,672,356 | def withdraw(self, **params):
"""Submit a withdraw request.
https://www.binance.com/restapipub.html
Assumptions:
- You must have Withdraw permissions enabled on your API key
- You must have withdrawn to the address specified through the website and approved the transaction via email
:param asset: required
:type asset: str
:type address: required
:type address: str
:type addressTag: optional - Secondary address identifier for coins like XRP,XMR etc.
:type address: str
:param amount: required
:type amount: decimal
:param name: optional - Description of the address, default asset value passed will be used
:type name: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"msg": "success",
"success": true,
"id":"7213fea8e94b4a5593d507237e5a555b"
}
:raises: BinanceRequestException, BinanceAPIException, BinanceWithdrawException
"""
# force a name for the withdrawal if one not set
if 'asset' in params and 'name' not in params:
params['name'] = params['asset']
res = self._request_withdraw_api('post', 'withdraw.html', True, data=params)
if not res['success']:
raise BinanceWithdrawException(res['msg'])
return res | [
"def",
"withdraw",
"(",
"self",
",",
"**",
"params",
")",
":",
"if",
"'asset'",
"in",
"params",
"and",
"'name'",
"not",
"in",
"params",
":",
"params",
"[",
"'name'",
"]",
"=",
"params",
"[",
"'asset'",
"]",
"res",
"=",
"self",
".",
"_request_withdraw_api",
"(",
"'post'",
",",
"'withdraw.html'",
",",
"True",
",",
"data",
"=",
"params",
")",
"if",
"not",
"res",
"[",
"'success'",
"]",
":",
"raise",
"BinanceWithdrawException",
"(",
"res",
"[",
"'msg'",
"]",
")",
"return",
"res"
] | python | Submit a withdraw request.
https://www.binance.com/restapipub.html
Assumptions:
- You must have Withdraw permissions enabled on your API key
- You must have withdrawn to the address specified through the website and approved the transaction via email
:param asset: required
:type asset: str
:type address: required
:type address: str
:type addressTag: optional - Secondary address identifier for coins like XRP,XMR etc.
:type address: str
:param amount: required
:type amount: decimal
:param name: optional - Description of the address, default asset value passed will be used
:type name: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"msg": "success",
"success": true,
"id":"7213fea8e94b4a5593d507237e5a555b"
}
:raises: BinanceRequestException, BinanceAPIException, BinanceWithdrawException | false |
1,854,439 | def geo_point_n(arg, n):
"""Return the Nth point in a single linestring in the geometry.
Negative values are counted backwards from the end of the LineString,
so that -1 is the last point. Returns NULL if there is no linestring in
the geometry
Parameters
----------
arg : geometry
n : integer
Returns
-------
PointN : geometry scalar
"""
op = ops.GeoPointN(arg, n)
return op.to_expr() | [
"def",
"geo_point_n",
"(",
"arg",
",",
"n",
")",
":",
"op",
"=",
"ops",
".",
"GeoPointN",
"(",
"arg",
",",
"n",
")",
"return",
"op",
".",
"to_expr",
"(",
")"
] | python | Return the Nth point in a single linestring in the geometry.
Negative values are counted backwards from the end of the LineString,
so that -1 is the last point. Returns NULL if there is no linestring in
the geometry
Parameters
----------
arg : geometry
n : integer
Returns
-------
PointN : geometry scalar | false |
1,910,018 | def _verify_and_add_jwt():
"""
This helper method just checks and adds jwt data to the app context. Will
not add jwt data if it is already present. Only use in this module
"""
if not app_context_has_jwt_data():
guard = current_guard()
token = guard.read_token_from_header()
jwt_data = guard.extract_jwt_token(token)
add_jwt_data_to_app_context(jwt_data) | [
"def",
"_verify_and_add_jwt",
"(",
")",
":",
"if",
"not",
"app_context_has_jwt_data",
"(",
")",
":",
"guard",
"=",
"current_guard",
"(",
")",
"token",
"=",
"guard",
".",
"read_token_from_header",
"(",
")",
"jwt_data",
"=",
"guard",
".",
"extract_jwt_token",
"(",
"token",
")",
"add_jwt_data_to_app_context",
"(",
"jwt_data",
")"
] | python | This helper method just checks and adds jwt data to the app context. Will
not add jwt data if it is already present. Only use in this module | false |
1,661,636 | def delete_dns_server(self, service_name, deployment_name, dns_server_name):
'''
Deletes a DNS server from a deployment.
service_name:
The name of the service.
deployment_name:
The name of the deployment.
dns_server_name:
Name of the DNS server that you want to delete.
'''
_validate_not_none('service_name', service_name)
_validate_not_none('deployment_name', deployment_name)
_validate_not_none('dns_server_name', dns_server_name)
return self._perform_delete(
self._get_dns_server_path(service_name,
deployment_name,
dns_server_name),
as_async=True) | [
"def",
"delete_dns_server",
"(",
"self",
",",
"service_name",
",",
"deployment_name",
",",
"dns_server_name",
")",
":",
"_validate_not_none",
"(",
"'service_name'",
",",
"service_name",
")",
"_validate_not_none",
"(",
"'deployment_name'",
",",
"deployment_name",
")",
"_validate_not_none",
"(",
"'dns_server_name'",
",",
"dns_server_name",
")",
"return",
"self",
".",
"_perform_delete",
"(",
"self",
".",
"_get_dns_server_path",
"(",
"service_name",
",",
"deployment_name",
",",
"dns_server_name",
")",
",",
"as_async",
"=",
"True",
")"
] | python | Deletes a DNS server from a deployment.
service_name:
The name of the service.
deployment_name:
The name of the deployment.
dns_server_name:
Name of the DNS server that you want to delete. | false |
2,537,542 | def kwe_by_textrank(text, top=10, pos=None, with_weight=False, key_phrase=False):
"""
:param text:
:param top:
:param pos:
:param with_weight:
:param key_phrase:
:return:
"""
if pos is None:
pos = ['ns', 'n', 'vn', 'v', 'nr']
tr4k = TextRank4Keyword(allow_speech_tags=pos)
tr4k.analyze(text)
kw = tr4k.get_keywords(num=top, word_min_len=2)
if not with_weight:
kw = [x['word'] for x in kw]
else:
kw = [(x['word'], x['weight']) for x in kw]
if key_phrase:
kp = tr4k.get_keyphrases(keywords_num=top, min_occur_num=2)
return kw, kp
else:
return kw | [
"def",
"kwe_by_textrank",
"(",
"text",
",",
"top",
"=",
"10",
",",
"pos",
"=",
"None",
",",
"with_weight",
"=",
"False",
",",
"key_phrase",
"=",
"False",
")",
":",
"if",
"pos",
"is",
"None",
":",
"pos",
"=",
"[",
"'ns'",
",",
"'n'",
",",
"'vn'",
",",
"'v'",
",",
"'nr'",
"]",
"tr4k",
"=",
"TextRank4Keyword",
"(",
"allow_speech_tags",
"=",
"pos",
")",
"tr4k",
".",
"analyze",
"(",
"text",
")",
"kw",
"=",
"tr4k",
".",
"get_keywords",
"(",
"num",
"=",
"top",
",",
"word_min_len",
"=",
"2",
")",
"if",
"not",
"with_weight",
":",
"kw",
"=",
"[",
"x",
"[",
"'word'",
"]",
"for",
"x",
"in",
"kw",
"]",
"else",
":",
"kw",
"=",
"[",
"(",
"x",
"[",
"'word'",
"]",
",",
"x",
"[",
"'weight'",
"]",
")",
"for",
"x",
"in",
"kw",
"]",
"if",
"key_phrase",
":",
"kp",
"=",
"tr4k",
".",
"get_keyphrases",
"(",
"keywords_num",
"=",
"top",
",",
"min_occur_num",
"=",
"2",
")",
"return",
"kw",
",",
"kp",
"else",
":",
"return",
"kw"
] | python | :param text:
:param top:
:param pos:
:param with_weight:
:param key_phrase:
:return: | false |
1,815,054 | def url(self, service):
'''return URL for a tile'''
if service not in TILE_SERVICES:
raise TileException('unknown tile service %s' % service)
url = string.Template(TILE_SERVICES[service])
(x,y) = self.tile
tile_info = TileServiceInfo(x, y, self.zoom)
return url.substitute(tile_info) | [
"def",
"url",
"(",
"self",
",",
"service",
")",
":",
"if",
"service",
"not",
"in",
"TILE_SERVICES",
":",
"raise",
"TileException",
"(",
"'unknown tile service %s'",
"%",
"service",
")",
"url",
"=",
"string",
".",
"Template",
"(",
"TILE_SERVICES",
"[",
"service",
"]",
")",
"(",
"x",
",",
"y",
")",
"=",
"self",
".",
"tile",
"tile_info",
"=",
"TileServiceInfo",
"(",
"x",
",",
"y",
",",
"self",
".",
"zoom",
")",
"return",
"url",
".",
"substitute",
"(",
"tile_info",
")"
] | python | return URL for a tile | false |
2,425,978 | def request_finished_callback(sender, **kwargs):
"""This function logs if the user acceses the page"""
logger = logging.getLogger(__name__)
level = settings.AUTOMATED_LOGGING['loglevel']['request']
user = get_current_user()
uri, application, method, status = get_current_environ()
excludes = settings.AUTOMATED_LOGGING['exclude']['request']
if status and status in excludes:
return
if method and method.lower() in excludes:
return
if not settings.AUTOMATED_LOGGING['request']['query']:
uri = urllib.parse.urlparse(uri).path
logger.log(level, ('%s performed request at %s (%s %s)' %
(user, uri, method, status)).replace(" ", " "), extra={
'action': 'request',
'data': {
'user': user,
'uri': uri,
'method': method,
'application': application,
'status': status
}
}) | [
"def",
"request_finished_callback",
"(",
"sender",
",",
"**",
"kwargs",
")",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"level",
"=",
"settings",
".",
"AUTOMATED_LOGGING",
"[",
"'loglevel'",
"]",
"[",
"'request'",
"]",
"user",
"=",
"get_current_user",
"(",
")",
"uri",
",",
"application",
",",
"method",
",",
"status",
"=",
"get_current_environ",
"(",
")",
"excludes",
"=",
"settings",
".",
"AUTOMATED_LOGGING",
"[",
"'exclude'",
"]",
"[",
"'request'",
"]",
"if",
"status",
"and",
"status",
"in",
"excludes",
":",
"return",
"if",
"method",
"and",
"method",
".",
"lower",
"(",
")",
"in",
"excludes",
":",
"return",
"if",
"not",
"settings",
".",
"AUTOMATED_LOGGING",
"[",
"'request'",
"]",
"[",
"'query'",
"]",
":",
"uri",
"=",
"urllib",
".",
"parse",
".",
"urlparse",
"(",
"uri",
")",
".",
"path",
"logger",
".",
"log",
"(",
"level",
",",
"(",
"'%s performed request at %s (%s %s)'",
"%",
"(",
"user",
",",
"uri",
",",
"method",
",",
"status",
")",
")",
".",
"replace",
"(",
"\" \"",
",",
"\" \"",
")",
",",
"extra",
"=",
"{",
"'action'",
":",
"'request'",
",",
"'data'",
":",
"{",
"'user'",
":",
"user",
",",
"'uri'",
":",
"uri",
",",
"'method'",
":",
"method",
",",
"'application'",
":",
"application",
",",
"'status'",
":",
"status",
"}",
"}",
")"
] | python | This function logs if the user acceses the page | false |
2,615,064 | def get_user_application_data_directory():
"""
| Returns the user Application directory.
| The difference between :func:`get_user_application_data_directory`
and :func:`get_system_application_data_directory` definitions is that :func:`get_user_application_data_directory` definition
will append :attr:`foundations.globals.constants.Constants.provider_directory`
and :attr:`foundations.globals.constants.Constants.application_directory` attributes values to the path returned.
| If the user Application directory is not available, the function will fallback to system temporary directory.
Examples directories::
- 'C:\\Users\\$USER\\AppData\\Roaming\\Provider\\Application' on Windows 7.
- 'C:\\Documents and Settings\\$USER\\Application Data\\Provider\\Application' on Windows XP.
- '/Users/$USER/Library/Preferences/Provider/Application' on Mac Os X.
- '/home/$USER/.Provider/Application' on Linux.
:return: User Application directory.
:rtype: unicode
"""
system_application_data_directory = get_system_application_data_directory()
if not foundations.common.path_exists(system_application_data_directory):
LOGGER.error(
"!> Undefined or non existing system Application data directory, using 'HOME' directory as fallback!")
system_application_data_directory = Environment("HOME").get_value()
if not foundations.common.path_exists(system_application_data_directory):
temporary_directory = get_temporary_directory()
LOGGER.error("!> Undefined or non existing 'HOME' directory, using system temporary directory as fallback!")
system_application_data_directory = temporary_directory
return os.path.join(system_application_data_directory, Constants.provider_directory,
Constants.application_directory) | [
"def",
"get_user_application_data_directory",
"(",
")",
":",
"system_application_data_directory",
"=",
"get_system_application_data_directory",
"(",
")",
"if",
"not",
"foundations",
".",
"common",
".",
"path_exists",
"(",
"system_application_data_directory",
")",
":",
"LOGGER",
".",
"error",
"(",
"\"!> Undefined or non existing system Application data directory, using 'HOME' directory as fallback!\"",
")",
"system_application_data_directory",
"=",
"Environment",
"(",
"\"HOME\"",
")",
".",
"get_value",
"(",
")",
"if",
"not",
"foundations",
".",
"common",
".",
"path_exists",
"(",
"system_application_data_directory",
")",
":",
"temporary_directory",
"=",
"get_temporary_directory",
"(",
")",
"LOGGER",
".",
"error",
"(",
"\"!> Undefined or non existing 'HOME' directory, using system temporary directory as fallback!\"",
")",
"system_application_data_directory",
"=",
"temporary_directory",
"return",
"os",
".",
"path",
".",
"join",
"(",
"system_application_data_directory",
",",
"Constants",
".",
"provider_directory",
",",
"Constants",
".",
"application_directory",
")"
] | python | | Returns the user Application directory.
| The difference between :func:`get_user_application_data_directory`
and :func:`get_system_application_data_directory` definitions is that :func:`get_user_application_data_directory` definition
will append :attr:`foundations.globals.constants.Constants.provider_directory`
and :attr:`foundations.globals.constants.Constants.application_directory` attributes values to the path returned.
| If the user Application directory is not available, the function will fallback to system temporary directory.
Examples directories::
- 'C:\\Users\\$USER\\AppData\\Roaming\\Provider\\Application' on Windows 7.
- 'C:\\Documents and Settings\\$USER\\Application Data\\Provider\\Application' on Windows XP.
- '/Users/$USER/Library/Preferences/Provider/Application' on Mac Os X.
- '/home/$USER/.Provider/Application' on Linux.
:return: User Application directory.
:rtype: unicode | false |
1,853,783 | def rename(self, new_name, database=None):
"""
Rename table inside MapD. References to the old table are no longer
valid.
Parameters
----------
new_name : string
database : string
Returns
-------
renamed : MapDTable
"""
m = ddl.fully_qualified_re.match(new_name)
if not m and database is None:
database = self._database
statement = ddl.RenameTable(
self._qualified_name, new_name, new_database=database
)
self._client._execute(statement)
op = self.op().change_name(statement.new_qualified_name)
return type(self)(op) | [
"def",
"rename",
"(",
"self",
",",
"new_name",
",",
"database",
"=",
"None",
")",
":",
"m",
"=",
"ddl",
".",
"fully_qualified_re",
".",
"match",
"(",
"new_name",
")",
"if",
"not",
"m",
"and",
"database",
"is",
"None",
":",
"database",
"=",
"self",
".",
"_database",
"statement",
"=",
"ddl",
".",
"RenameTable",
"(",
"self",
".",
"_qualified_name",
",",
"new_name",
",",
"new_database",
"=",
"database",
")",
"self",
".",
"_client",
".",
"_execute",
"(",
"statement",
")",
"op",
"=",
"self",
".",
"op",
"(",
")",
".",
"change_name",
"(",
"statement",
".",
"new_qualified_name",
")",
"return",
"type",
"(",
"self",
")",
"(",
"op",
")"
] | python | Rename table inside MapD. References to the old table are no longer
valid.
Parameters
----------
new_name : string
database : string
Returns
-------
renamed : MapDTable | false |
1,600,342 | def to_feature(value):
"""Convert the given value to Feature if necessary."""
if isinstance(value, FeatureConnector):
return value
elif utils.is_dtype(value): # tf.int32, tf.string,...
return Tensor(shape=(), dtype=tf.as_dtype(value))
elif isinstance(value, dict):
return FeaturesDict(value)
else:
raise ValueError('Feature not supported: {}'.format(value)) | [
"def",
"to_feature",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"FeatureConnector",
")",
":",
"return",
"value",
"elif",
"utils",
".",
"is_dtype",
"(",
"value",
")",
":",
"return",
"Tensor",
"(",
"shape",
"=",
"(",
")",
",",
"dtype",
"=",
"tf",
".",
"as_dtype",
"(",
"value",
")",
")",
"elif",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"return",
"FeaturesDict",
"(",
"value",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Feature not supported: {}'",
".",
"format",
"(",
"value",
")",
")"
] | python | Convert the given value to Feature if necessary. | false |
2,386,809 | def replace_evict_func(self, func, only_read=False):
"""
>>> cache = Cache(log_level=logging.WARNING)
>>> def evict(dict, evict_number=10): pass
>>> cache.replace_evict_func(evict)
True
>>> def evict_b(dict): pass
>>> cache.replace_evict_func(evict_b)
False
>>> def evict_c(dict, a, b): pass
>>> cache.replace_evict_func(evict_c)
False
"""
self.logger.info('Replace the evict function %s ---> %s' % (
get_function_signature(self.evict_func), get_function_signature(func)))
self.evict_func = func
return True | [
"def",
"replace_evict_func",
"(",
"self",
",",
"func",
",",
"only_read",
"=",
"False",
")",
":",
"self",
".",
"logger",
".",
"info",
"(",
"'Replace the evict function %s ---> %s'",
"%",
"(",
"get_function_signature",
"(",
"self",
".",
"evict_func",
")",
",",
"get_function_signature",
"(",
"func",
")",
")",
")",
"self",
".",
"evict_func",
"=",
"func",
"return",
"True"
] | python | >>> cache = Cache(log_level=logging.WARNING)
>>> def evict(dict, evict_number=10): pass
>>> cache.replace_evict_func(evict)
True
>>> def evict_b(dict): pass
>>> cache.replace_evict_func(evict_b)
False
>>> def evict_c(dict, a, b): pass
>>> cache.replace_evict_func(evict_c)
False | false |
2,505,049 | def scansum(self,seq,threshold = -1000):
"""
m.scansum(seq,threshold = -1000) -- Sum of scores over every window in the sequence. Returns
total, number of matches above threshold, average score, sum of exp(score)
"""
ll = self.ll
sum = 0
width = self.width
width_r = range(width)
width_rcr = range(width-1,-1,-1)
width_ranges = zip(width_r,width_rcr)
seqcomp = seq.translate(revcompTBL)
total = 0
hits = 0
etotal= 0
for offset in range(len(seq)-width+1):
total_f = 0
total_r = 0
for i,ir in width_ranges:
pos = offset+i
total_f = total_f + ll[i][ seq[pos]]
total_r = total_r + ll[i][seqcomp[pos]]
total_max = max(total_f,total_r)
if total_max >= threshold:
total = total + total_max
etotal = etotal + math.exp(total_max)
hits = hits + 1
if not hits:
ave = 0
else:
ave = float(total)/float(hits)
return(total,hits,ave,math.log(etotal)) | [
"def",
"scansum",
"(",
"self",
",",
"seq",
",",
"threshold",
"=",
"-",
"1000",
")",
":",
"ll",
"=",
"self",
".",
"ll",
"sum",
"=",
"0",
"width",
"=",
"self",
".",
"width",
"width_r",
"=",
"range",
"(",
"width",
")",
"width_rcr",
"=",
"range",
"(",
"width",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
"width_ranges",
"=",
"zip",
"(",
"width_r",
",",
"width_rcr",
")",
"seqcomp",
"=",
"seq",
".",
"translate",
"(",
"revcompTBL",
")",
"total",
"=",
"0",
"hits",
"=",
"0",
"etotal",
"=",
"0",
"for",
"offset",
"in",
"range",
"(",
"len",
"(",
"seq",
")",
"-",
"width",
"+",
"1",
")",
":",
"total_f",
"=",
"0",
"total_r",
"=",
"0",
"for",
"i",
",",
"ir",
"in",
"width_ranges",
":",
"pos",
"=",
"offset",
"+",
"i",
"total_f",
"=",
"total_f",
"+",
"ll",
"[",
"i",
"]",
"[",
"seq",
"[",
"pos",
"]",
"]",
"total_r",
"=",
"total_r",
"+",
"ll",
"[",
"i",
"]",
"[",
"seqcomp",
"[",
"pos",
"]",
"]",
"total_max",
"=",
"max",
"(",
"total_f",
",",
"total_r",
")",
"if",
"total_max",
">=",
"threshold",
":",
"total",
"=",
"total",
"+",
"total_max",
"etotal",
"=",
"etotal",
"+",
"math",
".",
"exp",
"(",
"total_max",
")",
"hits",
"=",
"hits",
"+",
"1",
"if",
"not",
"hits",
":",
"ave",
"=",
"0",
"else",
":",
"ave",
"=",
"float",
"(",
"total",
")",
"/",
"float",
"(",
"hits",
")",
"return",
"(",
"total",
",",
"hits",
",",
"ave",
",",
"math",
".",
"log",
"(",
"etotal",
")",
")"
] | python | m.scansum(seq,threshold = -1000) -- Sum of scores over every window in the sequence. Returns
total, number of matches above threshold, average score, sum of exp(score) | false |
1,820,696 | def store(self, val, addr):
"""
Store a VexValue in memory at the specified loaction.
:param val: The VexValue of the value to store
:param addr: The VexValue of the address to store into
:return: None
"""
self.irsb_c.store(addr.rdt, val.rdt) | [
"def",
"store",
"(",
"self",
",",
"val",
",",
"addr",
")",
":",
"self",
".",
"irsb_c",
".",
"store",
"(",
"addr",
".",
"rdt",
",",
"val",
".",
"rdt",
")"
] | python | Store a VexValue in memory at the specified loaction.
:param val: The VexValue of the value to store
:param addr: The VexValue of the address to store into
:return: None | false |
2,180,665 | def add_hgnc_id(self, genes):
"""Add the correct hgnc id to a set of genes with hgnc symbols
Args:
genes(list(dict)): A set of genes with hgnc symbols only
"""
genes_by_alias = self.genes_by_alias()
for gene in genes:
id_info = genes_by_alias.get(gene['hgnc_symbol'])
if not id_info:
LOG.warning("Gene %s does not exist in scout", gene['hgnc_symbol'])
continue
gene['hgnc_id'] = id_info['true']
if not id_info['true']:
if len(id_info['ids']) > 1:
LOG.warning("Gene %s has ambiguous value, please choose one hgnc id in result", gene['hgnc_symbol'])
gene['hgnc_id'] = ','.join([str(hgnc_id) for hgnc_id in id_info['ids']]) | [
"def",
"add_hgnc_id",
"(",
"self",
",",
"genes",
")",
":",
"genes_by_alias",
"=",
"self",
".",
"genes_by_alias",
"(",
")",
"for",
"gene",
"in",
"genes",
":",
"id_info",
"=",
"genes_by_alias",
".",
"get",
"(",
"gene",
"[",
"'hgnc_symbol'",
"]",
")",
"if",
"not",
"id_info",
":",
"LOG",
".",
"warning",
"(",
"\"Gene %s does not exist in scout\"",
",",
"gene",
"[",
"'hgnc_symbol'",
"]",
")",
"continue",
"gene",
"[",
"'hgnc_id'",
"]",
"=",
"id_info",
"[",
"'true'",
"]",
"if",
"not",
"id_info",
"[",
"'true'",
"]",
":",
"if",
"len",
"(",
"id_info",
"[",
"'ids'",
"]",
")",
">",
"1",
":",
"LOG",
".",
"warning",
"(",
"\"Gene %s has ambiguous value, please choose one hgnc id in result\"",
",",
"gene",
"[",
"'hgnc_symbol'",
"]",
")",
"gene",
"[",
"'hgnc_id'",
"]",
"=",
"','",
".",
"join",
"(",
"[",
"str",
"(",
"hgnc_id",
")",
"for",
"hgnc_id",
"in",
"id_info",
"[",
"'ids'",
"]",
"]",
")"
] | python | Add the correct hgnc id to a set of genes with hgnc symbols
Args:
genes(list(dict)): A set of genes with hgnc symbols only | false |
2,268,481 | def minus(*args):
"""Also, converts either to ints or to floats."""
if len(args) == 1:
return -to_numeric(args[0])
return to_numeric(args[0]) - to_numeric(args[1]) | [
"def",
"minus",
"(",
"*",
"args",
")",
":",
"if",
"len",
"(",
"args",
")",
"==",
"1",
":",
"return",
"-",
"to_numeric",
"(",
"args",
"[",
"0",
"]",
")",
"return",
"to_numeric",
"(",
"args",
"[",
"0",
"]",
")",
"-",
"to_numeric",
"(",
"args",
"[",
"1",
"]",
")"
] | python | Also, converts either to ints or to floats. | false |
1,984,501 | def _compute_term_4(self, C, mag, R):
"""
(a16 + a17.*M + a18.*M.*M + a19.*M.*M.*M).*(d(r).^3)
"""
return (
(C['a16'] + C['a17'] * mag + C['a18'] * np.power(mag, 2) +
C['a19'] * np.power(mag, 3)) * np.power(R, 3)
) | [
"def",
"_compute_term_4",
"(",
"self",
",",
"C",
",",
"mag",
",",
"R",
")",
":",
"return",
"(",
"(",
"C",
"[",
"'a16'",
"]",
"+",
"C",
"[",
"'a17'",
"]",
"*",
"mag",
"+",
"C",
"[",
"'a18'",
"]",
"*",
"np",
".",
"power",
"(",
"mag",
",",
"2",
")",
"+",
"C",
"[",
"'a19'",
"]",
"*",
"np",
".",
"power",
"(",
"mag",
",",
"3",
")",
")",
"*",
"np",
".",
"power",
"(",
"R",
",",
"3",
")",
")"
] | python | (a16 + a17.*M + a18.*M.*M + a19.*M.*M.*M).*(d(r).^3) | false |
1,885,261 | def HandleVerack(self):
"""Handle the `verack` response."""
m = Message('verack')
self.SendSerializedMessage(m)
self.leader.NodeCount += 1
self.identifier = self.leader.NodeCount
logger.debug(f"{self.prefix} Handshake complete!")
self.handshake_complete = True
self.ProtocolReady() | [
"def",
"HandleVerack",
"(",
"self",
")",
":",
"m",
"=",
"Message",
"(",
"'verack'",
")",
"self",
".",
"SendSerializedMessage",
"(",
"m",
")",
"self",
".",
"leader",
".",
"NodeCount",
"+=",
"1",
"self",
".",
"identifier",
"=",
"self",
".",
"leader",
".",
"NodeCount",
"logger",
".",
"debug",
"(",
"f\"{self.prefix} Handshake complete!\"",
")",
"self",
".",
"handshake_complete",
"=",
"True",
"self",
".",
"ProtocolReady",
"(",
")"
] | python | Handle the `verack` response. | false |
2,186,714 | def services_list(self, io_handler, specification=None):
"""
Lists the services in the framework. Possibility to filter on an exact
specification.
"""
# Head of the table
headers = ("ID", "Specifications", "Bundle", "Ranking")
# Lines
references = (
self._context.get_all_service_references(specification, None) or []
)
# Construct the list of services
lines = [
[
str(entry)
for entry in (
ref.get_property(constants.SERVICE_ID),
ref.get_property(constants.OBJECTCLASS),
ref.get_bundle(),
ref.get_property(constants.SERVICE_RANKING),
)
]
for ref in references
]
if not lines and specification:
# No matching service found
io_handler.write_line("No service provides '{0}'", specification)
return False
# Print'em all
io_handler.write(self._utils.make_table(headers, lines))
io_handler.write_line("{0} services registered", len(lines))
return None | [
"def",
"services_list",
"(",
"self",
",",
"io_handler",
",",
"specification",
"=",
"None",
")",
":",
"headers",
"=",
"(",
"\"ID\"",
",",
"\"Specifications\"",
",",
"\"Bundle\"",
",",
"\"Ranking\"",
")",
"references",
"=",
"(",
"self",
".",
"_context",
".",
"get_all_service_references",
"(",
"specification",
",",
"None",
")",
"or",
"[",
"]",
")",
"lines",
"=",
"[",
"[",
"str",
"(",
"entry",
")",
"for",
"entry",
"in",
"(",
"ref",
".",
"get_property",
"(",
"constants",
".",
"SERVICE_ID",
")",
",",
"ref",
".",
"get_property",
"(",
"constants",
".",
"OBJECTCLASS",
")",
",",
"ref",
".",
"get_bundle",
"(",
")",
",",
"ref",
".",
"get_property",
"(",
"constants",
".",
"SERVICE_RANKING",
")",
",",
")",
"]",
"for",
"ref",
"in",
"references",
"]",
"if",
"not",
"lines",
"and",
"specification",
":",
"io_handler",
".",
"write_line",
"(",
"\"No service provides '{0}'\"",
",",
"specification",
")",
"return",
"False",
"io_handler",
".",
"write",
"(",
"self",
".",
"_utils",
".",
"make_table",
"(",
"headers",
",",
"lines",
")",
")",
"io_handler",
".",
"write_line",
"(",
"\"{0} services registered\"",
",",
"len",
"(",
"lines",
")",
")",
"return",
"None"
] | python | Lists the services in the framework. Possibility to filter on an exact
specification. | false |
2,387,157 | def tracking_m2m(
sender, instance, action, reverse, model, pk_set, using, **kwargs
):
"""
m2m_changed callback.
The idea is to get the model and the instance of the object being tracked,
and the different objects being added/removed. It is then send to the
``_create_tracked_field_m2m`` method to extract the proper attribute for
the TrackedFieldModification.
"""
action_event = {
'pre_clear': 'CLEAR',
'pre_add': 'ADD',
'pre_remove': 'REMOVE',
}
if (action not in action_event.keys()):
return
if reverse:
if action == 'pre_clear':
# It will actually be a remove of ``instance`` on every
# tracked object being related
action = 'pre_remove'
# pk_set is None for clear events, we need to get objects' pk.
field = _get_m2m_field(model, sender)
field = model._meta.get_field(field).remote_field.get_accessor_name()
pk_set = set([obj.id for obj in getattr(instance, field).all()])
# Create an event for each object being tracked
for pk in pk_set:
tracked_instance = model.objects.get(pk=pk)
objects = [instance]
_create_tracked_event_m2m(
model, tracked_instance, sender, objects, action_event[action]
)
else:
# Get the model of the object being tracked
tracked_model = instance._meta.model
objects = []
if pk_set is not None:
objects = [model.objects.get(pk=pk) for pk in pk_set]
_create_tracked_event_m2m(
tracked_model, instance, sender, objects, action_event[action]
) | [
"def",
"tracking_m2m",
"(",
"sender",
",",
"instance",
",",
"action",
",",
"reverse",
",",
"model",
",",
"pk_set",
",",
"using",
",",
"**",
"kwargs",
")",
":",
"action_event",
"=",
"{",
"'pre_clear'",
":",
"'CLEAR'",
",",
"'pre_add'",
":",
"'ADD'",
",",
"'pre_remove'",
":",
"'REMOVE'",
",",
"}",
"if",
"(",
"action",
"not",
"in",
"action_event",
".",
"keys",
"(",
")",
")",
":",
"return",
"if",
"reverse",
":",
"if",
"action",
"==",
"'pre_clear'",
":",
"action",
"=",
"'pre_remove'",
"field",
"=",
"_get_m2m_field",
"(",
"model",
",",
"sender",
")",
"field",
"=",
"model",
".",
"_meta",
".",
"get_field",
"(",
"field",
")",
".",
"remote_field",
".",
"get_accessor_name",
"(",
")",
"pk_set",
"=",
"set",
"(",
"[",
"obj",
".",
"id",
"for",
"obj",
"in",
"getattr",
"(",
"instance",
",",
"field",
")",
".",
"all",
"(",
")",
"]",
")",
"for",
"pk",
"in",
"pk_set",
":",
"tracked_instance",
"=",
"model",
".",
"objects",
".",
"get",
"(",
"pk",
"=",
"pk",
")",
"objects",
"=",
"[",
"instance",
"]",
"_create_tracked_event_m2m",
"(",
"model",
",",
"tracked_instance",
",",
"sender",
",",
"objects",
",",
"action_event",
"[",
"action",
"]",
")",
"else",
":",
"tracked_model",
"=",
"instance",
".",
"_meta",
".",
"model",
"objects",
"=",
"[",
"]",
"if",
"pk_set",
"is",
"not",
"None",
":",
"objects",
"=",
"[",
"model",
".",
"objects",
".",
"get",
"(",
"pk",
"=",
"pk",
")",
"for",
"pk",
"in",
"pk_set",
"]",
"_create_tracked_event_m2m",
"(",
"tracked_model",
",",
"instance",
",",
"sender",
",",
"objects",
",",
"action_event",
"[",
"action",
"]",
")"
] | python | m2m_changed callback.
The idea is to get the model and the instance of the object being tracked,
and the different objects being added/removed. It is then send to the
``_create_tracked_field_m2m`` method to extract the proper attribute for
the TrackedFieldModification. | false |
1,620,929 | def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, PolicyV1beta1FSGroupStrategyOptions):
return False
return self.__dict__ == other.__dict__ | [
"def",
"__eq__",
"(",
"self",
",",
"other",
")",
":",
"if",
"not",
"isinstance",
"(",
"other",
",",
"PolicyV1beta1FSGroupStrategyOptions",
")",
":",
"return",
"False",
"return",
"self",
".",
"__dict__",
"==",
"other",
".",
"__dict__"
] | python | Returns true if both objects are equal | false |
2,015,382 | def __repr__(self):
""" Return a representation of internal state.
"""
if self._rpc:
# Connected state
return "%s connected to %s [%s, up %s] via %r" % (
self.__class__.__name__, self.engine_id, self.engine_software,
fmt.human_duration(self.uptime, 0, 2, True).strip(), config.scgi_url,
)
else:
# Unconnected state
self.load_config()
return "%s connectable via %r" % (
self.__class__.__name__, config.scgi_url,
) | [
"def",
"__repr__",
"(",
"self",
")",
":",
"if",
"self",
".",
"_rpc",
":",
"return",
"\"%s connected to %s [%s, up %s] via %r\"",
"%",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"self",
".",
"engine_id",
",",
"self",
".",
"engine_software",
",",
"fmt",
".",
"human_duration",
"(",
"self",
".",
"uptime",
",",
"0",
",",
"2",
",",
"True",
")",
".",
"strip",
"(",
")",
",",
"config",
".",
"scgi_url",
",",
")",
"else",
":",
"self",
".",
"load_config",
"(",
")",
"return",
"\"%s connectable via %r\"",
"%",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"config",
".",
"scgi_url",
",",
")"
] | python | Return a representation of internal state. | false |
2,693,545 | def set(self, key, value):
"""
Updates the value of the given key in the loaded content.
Args:
key (str): Key of the property to update.
value (str): New value of the property.
Return:
bool: Indicates whether or not a change was made.
"""
match = self._get_match(key=key)
if not match:
self._log.info('"%s" does not exist, so it will be added.', key)
if isinstance(value, str):
self._log.info('"%s" will be added as a PHP string value.',
key)
value_str = '\'{}\''.format(value)
else:
self._log.info('"%s" will be added as a PHP object value.',
key)
value_str = str(value).lower()
new = 'define(\'{key}\', {value});'.format(
key=key,
value=value_str)
self._log.info('"%s" will be added as: %s', key, new)
replace_this = '<?php\n'
replace_with = '<?php\n' + new + '\n'
self._content = self._content.replace(replace_this, replace_with)
self._log.info('Content string has been updated.')
return True
if self._get_value_from_match(key=key, match=match) == value:
self._log.info('"%s" is already up-to-date.', key)
return False
self._log.info('"%s" exists and will be updated.', key)
start_index = match.start(1)
end_index = match.end(1)
if isinstance(value, bool):
value = str(value).lower()
self._log.info('"%s" will be updated with boolean value: %s',
key,
value)
else:
self._log.info('"%s" will be updated with string value: %s',
key,
value)
start = self._content[:start_index]
end = self._content[end_index:]
self._content = start + value + end
return True | [
"def",
"set",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"match",
"=",
"self",
".",
"_get_match",
"(",
"key",
"=",
"key",
")",
"if",
"not",
"match",
":",
"self",
".",
"_log",
".",
"info",
"(",
"'\"%s\" does not exist, so it will be added.'",
",",
"key",
")",
"if",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"self",
".",
"_log",
".",
"info",
"(",
"'\"%s\" will be added as a PHP string value.'",
",",
"key",
")",
"value_str",
"=",
"'\\'{}\\''",
".",
"format",
"(",
"value",
")",
"else",
":",
"self",
".",
"_log",
".",
"info",
"(",
"'\"%s\" will be added as a PHP object value.'",
",",
"key",
")",
"value_str",
"=",
"str",
"(",
"value",
")",
".",
"lower",
"(",
")",
"new",
"=",
"'define(\\'{key}\\', {value});'",
".",
"format",
"(",
"key",
"=",
"key",
",",
"value",
"=",
"value_str",
")",
"self",
".",
"_log",
".",
"info",
"(",
"'\"%s\" will be added as: %s'",
",",
"key",
",",
"new",
")",
"replace_this",
"=",
"'<?php\\n'",
"replace_with",
"=",
"'<?php\\n'",
"+",
"new",
"+",
"'\\n'",
"self",
".",
"_content",
"=",
"self",
".",
"_content",
".",
"replace",
"(",
"replace_this",
",",
"replace_with",
")",
"self",
".",
"_log",
".",
"info",
"(",
"'Content string has been updated.'",
")",
"return",
"True",
"if",
"self",
".",
"_get_value_from_match",
"(",
"key",
"=",
"key",
",",
"match",
"=",
"match",
")",
"==",
"value",
":",
"self",
".",
"_log",
".",
"info",
"(",
"'\"%s\" is already up-to-date.'",
",",
"key",
")",
"return",
"False",
"self",
".",
"_log",
".",
"info",
"(",
"'\"%s\" exists and will be updated.'",
",",
"key",
")",
"start_index",
"=",
"match",
".",
"start",
"(",
"1",
")",
"end_index",
"=",
"match",
".",
"end",
"(",
"1",
")",
"if",
"isinstance",
"(",
"value",
",",
"bool",
")",
":",
"value",
"=",
"str",
"(",
"value",
")",
".",
"lower",
"(",
")",
"self",
".",
"_log",
".",
"info",
"(",
"'\"%s\" will be updated with boolean value: %s'",
",",
"key",
",",
"value",
")",
"else",
":",
"self",
".",
"_log",
".",
"info",
"(",
"'\"%s\" will be updated with string value: %s'",
",",
"key",
",",
"value",
")",
"start",
"=",
"self",
".",
"_content",
"[",
":",
"start_index",
"]",
"end",
"=",
"self",
".",
"_content",
"[",
"end_index",
":",
"]",
"self",
".",
"_content",
"=",
"start",
"+",
"value",
"+",
"end",
"return",
"True"
] | python | Updates the value of the given key in the loaded content.
Args:
key (str): Key of the property to update.
value (str): New value of the property.
Return:
bool: Indicates whether or not a change was made. | false |
2,450,206 | def parse(md, model, encoding='utf-8', config=None):
"""
Translate the Versa Markdown syntax into Versa model relationships
md -- markdown source text
model -- Versa model to take the output relationship
encoding -- character encoding (defaults to UTF-8)
Returns: The overall base URI (`@base`) specified in the Markdown file, or None
>>> from versa.driver import memory
>>> from versa.reader.md import from_markdown
>>> m = memory.connection()
>>> from_markdown(open('test/resource/poetry.md').read(), m)
'http://uche.ogbuji.net/poems/'
>>> m.size()
40
>>> next(m.match(None, 'http://uche.ogbuji.net/poems/updated', '2013-10-15'))
(I(http://uche.ogbuji.net/poems/1), I(http://uche.ogbuji.net/poems/updated), '2013-10-15', {})
"""
#Set up configuration to interpret the conventions for the Markdown
config = config or {}
#This mapping takes syntactical elements such as the various header levels in Markdown and associates a resource type with the specified resources
syntaxtypemap = {}
if config.get('autotype-h1'): syntaxtypemap['h1'] = config.get('autotype-h1')
if config.get('autotype-h2'): syntaxtypemap['h2'] = config.get('autotype-h2')
if config.get('autotype-h3'): syntaxtypemap['h3'] = config.get('autotype-h3')
interp_stanza = config.get('interpretations', {})
interpretations = {}
def setup_interpretations(interp):
#Map the interpretation IRIs to functions to do the data prep
for prop, interp_key in interp.items():
if interp_key.startswith('@'):
interp_key = iri.absolutize(interp_key[1:], VERSA_BASEIRI)
if interp_key in PREP_METHODS:
interpretations[prop] = PREP_METHODS[interp_key]
else:
#just use the identity, i.e. no-op
interpretations[prop] = lambda x, **kwargs: x
setup_interpretations(interp_stanza)
#Prep ID generator, in case needed
idg = idgen(None)
#Parse the Markdown
#Alternately:
#from xml.sax.saxutils import escape, unescape
#h = markdown.markdown(escape(md.decode(encoding)), output_format='html5')
#Note: even using safe_mode this should not be presumed safe from tainted input
#h = markdown.markdown(md.decode(encoding), safe_mode='escape', output_format='html5')
comments = mkdcomments.CommentsExtension()
h = markdown.markdown(md, safe_mode='escape', output_format='html5', extensions=[comments])
#doc = html.markup_fragment(inputsource.text(h.encode('utf-8')))
tb = treebuilder()
h = '<html>' + h + '</html>'
root = tb.parse(h)
#Each section contains one resource description, but the special one named @docheader contains info to help interpret the rest
first_h1 = next(select_name(descendants(root), 'h1'))
#top_section_fields = itertools.takewhile(lambda x: x.xml_name != 'h1', select_name(following_siblings(first_h1), 'h2'))
#Extract header elements. Notice I use an empty element with an empty parent as the default result
docheader = next(select_value(select_name(descendants(root), 'h1'), '@docheader'), element('empty', parent=root)) # //h1[.="@docheader"]
sections = filter(lambda x: x.xml_value != '@docheader', select_name_pattern(descendants(root), HEADER_PAT)) # //h1[not(.="@docheader")]|h2[not(.="@docheader")]|h3[not(.="@docheader")]
def fields(sect):
'''
Each section represents a resource and contains a list with its properties
This generator parses the list and yields the key value pairs representing the properties
Some properties have attributes, expressed in markdown as a nested list. If present these attributes
Are yielded as well, else None is yielded
'''
#import logging; logging.debug(repr(sect))
#Pull all the list elements until the next header. This accommodates multiple lists in a section
sect_body_items = itertools.takewhile(lambda x: HEADER_PAT.match(x.xml_name) is None, select_elements(following_siblings(sect)))
#results_until(sect.xml_select('following-sibling::*'), 'self::h1|self::h2|self::h3')
#field_list = [ U(li) for ul in sect.xml_select('following-sibling::ul') for li in ul.xml_select('./li') ]
field_list = [ li for elem in select_name(sect_body_items, 'ul') for li in select_name(elem, 'li') ]
def parse_li(pair):
'''
Parse each list item into a property pair
'''
if pair.strip():
matched = REL_PAT.match(pair)
if not matched:
raise ValueError(_('Syntax error in relationship expression: {0}'.format(pair)))
#print matched.groups()
if matched.group(3): prop = matched.group(3).strip()
if matched.group(4): prop = matched.group(4).strip()
if matched.group(7):
val = matched.group(7).strip()
typeindic = RES_VAL
elif matched.group(9):
val = matched.group(9).strip()
typeindic = TEXT_VAL
elif matched.group(11):
val = matched.group(11).strip()
typeindic = TEXT_VAL
elif matched.group(12):
val = matched.group(12).strip()
typeindic = UNKNOWN_VAL
else:
val = ''
typeindic = UNKNOWN_VAL
#prop, val = [ part.strip() for part in U(li.xml_select('string(.)')).split(':', 1) ]
#import logging; logging.debug(repr((prop, val)))
return prop, val, typeindic
return None, None, None
#Go through each list item
for li in field_list:
#Is there a nested list, which expresses attributes on a property
if list(select_name(li, 'ul')):
#main = ''.join([ node.xml_value
# for node in itertools.takewhile(
# lambda x: x.xml_name != 'ul', select_elements(li)
# )
# ])
main = ''.join(itertools.takewhile(
lambda x: isinstance(x, text), li.xml_children
))
#main = li.xml_select('string(ul/preceding-sibling::node())')
prop, val, typeindic = parse_li(main)
subfield_list = [ parse_li(sli.xml_value) for e in select_name(li, 'ul') for sli in (
select_name(e, 'li')
) ]
subfield_list = [ (p, v, t) for (p, v, t) in subfield_list if p is not None ]
#Support a special case for syntax such as in the @iri and @interpretations: stanza of @docheader
if val is None: val = ''
yield prop, val, typeindic, subfield_list
#Just a regular, unadorned property
else:
prop, val, typeindic = parse_li(li.xml_value)
if prop: yield prop, val, typeindic, None
iris = {}
#Gather the document-level metadata from the @docheader section
base = propbase = rtbase = document_iri = default_lang = None
for prop, val, typeindic, subfield_list in fields(docheader):
#The @iri section is where key IRI prefixes can be set
if prop == '@iri':
for (k, uri, typeindic) in subfield_list:
if k == '@base':
base = propbase = rtbase = uri
elif k == '@property':
propbase = uri
elif k == '@resource-type':
rtbase = uri
else:
iris[k] = uri
#The @interpretations section is where defaults can be set as to the primitive types of values from the Markdown, based on the relevant property/relationship
elif prop == '@interpretations':
#Iterate over items from the @docheader/@interpretations section to set up for further parsing
interp = {}
for k, v, x in subfield_list:
interp[I(iri.absolutize(k, propbase))] = v
setup_interpretations(interp)
#Setting an IRI for this very document being parsed
elif prop == '@document':
document_iri = val
elif prop == '@language':
default_lang = val
#If we have a resource to which to attach them, just attach all other properties
elif document_iri or base:
rid = document_iri or base
fullprop = I(iri.absolutize(prop, propbase or base))
if fullprop in interpretations:
val = interpretations[fullprop](val, rid=rid, fullprop=fullprop, base=base, model=model)
if val is not None: model.add(rid, fullprop, val)
else:
model.add(rid, fullprop, val)
#Default IRI prefixes if @iri/@base is set
if not propbase: propbase = base
if not rtbase: rtbase = base
if not document_iri: document_iri = base
#Go through the resources expressed in remaining sections
for sect in sections:
#if U(sect) == '@docheader': continue #Not needed because excluded by ss
#The header can take one of 4 forms: "ResourceID" "ResourceID [ResourceType]" "[ResourceType]" or "[]"
#The 3rd form is for an anonymous resource with specified type and the 4th an anonymous resource with unspecified type
matched = RESOURCE_PAT.match(sect.xml_value)
if not matched:
raise ValueError(_('Syntax error in resource header: {0}'.format(sect.xml_value)))
rid = matched.group(1)
rtype = matched.group(3)
if rtype:
rtype = I(iri.absolutize(rtype, base))
if rid:
rid = I(iri.absolutize(rid, base))
if not rid:
rid = next(idg)
#Resource type might be set by syntax config
if not rtype:
rtype = syntaxtypemap.get(sect.xml_name)
if rtype:
model.add(rid, TYPE_REL, rtype)
#Add the property
for prop, val, typeindic, subfield_list in fields(sect):
attrs = {}
for (aprop, aval, atype) in subfield_list or ():
if atype == RES_VAL:
valmatch = URI_ABBR_PAT.match(aval)
if valmatch:
uri = iris[valmatch.group(1)]
attrs[aprop] = URI_ABBR_PAT.sub(uri + '\\2\\3', aval)
else:
attrs[aprop] = I(iri.absolutize(aval, rtbase))
elif atype == TEXT_VAL:
attrs[aprop] = aval
elif atype == UNKNOWN_VAL:
attrs[aprop] = aval
if aprop in interpretations:
aval = interpretations[aprop](aval, rid=rid, fullprop=aprop, base=base, model=model)
if aval is not None: attrs[aprop] = aval
else:
attrs[aprop] = aval
propmatch = URI_ABBR_PAT.match(prop)
if propmatch:
uri = iris[propmatch.group(1)]
fullprop = URI_ABBR_PAT.sub(uri + '\\2\\3', prop)
else:
fullprop = I(iri.absolutize(prop, propbase))
if typeindic == RES_VAL:
valmatch = URI_ABBR_PAT.match(aval)
if valmatch:
uri = iris[valmatch.group(1)]
val = URI_ABBR_PAT.sub(uri + '\\2\\3', val)
else:
val = I(iri.absolutize(val, rtbase))
model.add(rid, fullprop, val, attrs)
elif typeindic == TEXT_VAL:
if '@lang' not in attrs: attrs['@lang'] = default_lang
model.add(rid, fullprop, val, attrs)
elif typeindic == UNKNOWN_VAL:
if fullprop in interpretations:
val = interpretations[fullprop](val, rid=rid, fullprop=fullprop, base=base, model=model)
if val is not None: model.add(rid, fullprop, val)
else:
model.add(rid, fullprop, val, attrs)
#resinfo = AB_RESOURCE_PAT.match(val)
#if resinfo:
# val = resinfo.group(1)
# valtype = resinfo.group(3)
# if not val: val = model.generate_resource()
# if valtype: attrs[TYPE_REL] = valtype
return document_iri | [
"def",
"parse",
"(",
"md",
",",
"model",
",",
"encoding",
"=",
"'utf-8'",
",",
"config",
"=",
"None",
")",
":",
"config",
"=",
"config",
"or",
"{",
"}",
"syntaxtypemap",
"=",
"{",
"}",
"if",
"config",
".",
"get",
"(",
"'autotype-h1'",
")",
":",
"syntaxtypemap",
"[",
"'h1'",
"]",
"=",
"config",
".",
"get",
"(",
"'autotype-h1'",
")",
"if",
"config",
".",
"get",
"(",
"'autotype-h2'",
")",
":",
"syntaxtypemap",
"[",
"'h2'",
"]",
"=",
"config",
".",
"get",
"(",
"'autotype-h2'",
")",
"if",
"config",
".",
"get",
"(",
"'autotype-h3'",
")",
":",
"syntaxtypemap",
"[",
"'h3'",
"]",
"=",
"config",
".",
"get",
"(",
"'autotype-h3'",
")",
"interp_stanza",
"=",
"config",
".",
"get",
"(",
"'interpretations'",
",",
"{",
"}",
")",
"interpretations",
"=",
"{",
"}",
"def",
"setup_interpretations",
"(",
"interp",
")",
":",
"for",
"prop",
",",
"interp_key",
"in",
"interp",
".",
"items",
"(",
")",
":",
"if",
"interp_key",
".",
"startswith",
"(",
"'@'",
")",
":",
"interp_key",
"=",
"iri",
".",
"absolutize",
"(",
"interp_key",
"[",
"1",
":",
"]",
",",
"VERSA_BASEIRI",
")",
"if",
"interp_key",
"in",
"PREP_METHODS",
":",
"interpretations",
"[",
"prop",
"]",
"=",
"PREP_METHODS",
"[",
"interp_key",
"]",
"else",
":",
"interpretations",
"[",
"prop",
"]",
"=",
"lambda",
"x",
",",
"**",
"kwargs",
":",
"x",
"setup_interpretations",
"(",
"interp_stanza",
")",
"idg",
"=",
"idgen",
"(",
"None",
")",
"comments",
"=",
"mkdcomments",
".",
"CommentsExtension",
"(",
")",
"h",
"=",
"markdown",
".",
"markdown",
"(",
"md",
",",
"safe_mode",
"=",
"'escape'",
",",
"output_format",
"=",
"'html5'",
",",
"extensions",
"=",
"[",
"comments",
"]",
")",
"tb",
"=",
"treebuilder",
"(",
")",
"h",
"=",
"'<html>'",
"+",
"h",
"+",
"'</html>'",
"root",
"=",
"tb",
".",
"parse",
"(",
"h",
")",
"first_h1",
"=",
"next",
"(",
"select_name",
"(",
"descendants",
"(",
"root",
")",
",",
"'h1'",
")",
")",
"docheader",
"=",
"next",
"(",
"select_value",
"(",
"select_name",
"(",
"descendants",
"(",
"root",
")",
",",
"'h1'",
")",
",",
"'@docheader'",
")",
",",
"element",
"(",
"'empty'",
",",
"parent",
"=",
"root",
")",
")",
"sections",
"=",
"filter",
"(",
"lambda",
"x",
":",
"x",
".",
"xml_value",
"!=",
"'@docheader'",
",",
"select_name_pattern",
"(",
"descendants",
"(",
"root",
")",
",",
"HEADER_PAT",
")",
")",
"def",
"fields",
"(",
"sect",
")",
":",
"sect_body_items",
"=",
"itertools",
".",
"takewhile",
"(",
"lambda",
"x",
":",
"HEADER_PAT",
".",
"match",
"(",
"x",
".",
"xml_name",
")",
"is",
"None",
",",
"select_elements",
"(",
"following_siblings",
"(",
"sect",
")",
")",
")",
"field_list",
"=",
"[",
"li",
"for",
"elem",
"in",
"select_name",
"(",
"sect_body_items",
",",
"'ul'",
")",
"for",
"li",
"in",
"select_name",
"(",
"elem",
",",
"'li'",
")",
"]",
"def",
"parse_li",
"(",
"pair",
")",
":",
"if",
"pair",
".",
"strip",
"(",
")",
":",
"matched",
"=",
"REL_PAT",
".",
"match",
"(",
"pair",
")",
"if",
"not",
"matched",
":",
"raise",
"ValueError",
"(",
"_",
"(",
"'Syntax error in relationship expression: {0}'",
".",
"format",
"(",
"pair",
")",
")",
")",
"if",
"matched",
".",
"group",
"(",
"3",
")",
":",
"prop",
"=",
"matched",
".",
"group",
"(",
"3",
")",
".",
"strip",
"(",
")",
"if",
"matched",
".",
"group",
"(",
"4",
")",
":",
"prop",
"=",
"matched",
".",
"group",
"(",
"4",
")",
".",
"strip",
"(",
")",
"if",
"matched",
".",
"group",
"(",
"7",
")",
":",
"val",
"=",
"matched",
".",
"group",
"(",
"7",
")",
".",
"strip",
"(",
")",
"typeindic",
"=",
"RES_VAL",
"elif",
"matched",
".",
"group",
"(",
"9",
")",
":",
"val",
"=",
"matched",
".",
"group",
"(",
"9",
")",
".",
"strip",
"(",
")",
"typeindic",
"=",
"TEXT_VAL",
"elif",
"matched",
".",
"group",
"(",
"11",
")",
":",
"val",
"=",
"matched",
".",
"group",
"(",
"11",
")",
".",
"strip",
"(",
")",
"typeindic",
"=",
"TEXT_VAL",
"elif",
"matched",
".",
"group",
"(",
"12",
")",
":",
"val",
"=",
"matched",
".",
"group",
"(",
"12",
")",
".",
"strip",
"(",
")",
"typeindic",
"=",
"UNKNOWN_VAL",
"else",
":",
"val",
"=",
"''",
"typeindic",
"=",
"UNKNOWN_VAL",
"return",
"prop",
",",
"val",
",",
"typeindic",
"return",
"None",
",",
"None",
",",
"None",
"for",
"li",
"in",
"field_list",
":",
"if",
"list",
"(",
"select_name",
"(",
"li",
",",
"'ul'",
")",
")",
":",
"main",
"=",
"''",
".",
"join",
"(",
"itertools",
".",
"takewhile",
"(",
"lambda",
"x",
":",
"isinstance",
"(",
"x",
",",
"text",
")",
",",
"li",
".",
"xml_children",
")",
")",
"prop",
",",
"val",
",",
"typeindic",
"=",
"parse_li",
"(",
"main",
")",
"subfield_list",
"=",
"[",
"parse_li",
"(",
"sli",
".",
"xml_value",
")",
"for",
"e",
"in",
"select_name",
"(",
"li",
",",
"'ul'",
")",
"for",
"sli",
"in",
"(",
"select_name",
"(",
"e",
",",
"'li'",
")",
")",
"]",
"subfield_list",
"=",
"[",
"(",
"p",
",",
"v",
",",
"t",
")",
"for",
"(",
"p",
",",
"v",
",",
"t",
")",
"in",
"subfield_list",
"if",
"p",
"is",
"not",
"None",
"]",
"if",
"val",
"is",
"None",
":",
"val",
"=",
"''",
"yield",
"prop",
",",
"val",
",",
"typeindic",
",",
"subfield_list",
"else",
":",
"prop",
",",
"val",
",",
"typeindic",
"=",
"parse_li",
"(",
"li",
".",
"xml_value",
")",
"if",
"prop",
":",
"yield",
"prop",
",",
"val",
",",
"typeindic",
",",
"None",
"iris",
"=",
"{",
"}",
"base",
"=",
"propbase",
"=",
"rtbase",
"=",
"document_iri",
"=",
"default_lang",
"=",
"None",
"for",
"prop",
",",
"val",
",",
"typeindic",
",",
"subfield_list",
"in",
"fields",
"(",
"docheader",
")",
":",
"if",
"prop",
"==",
"'@iri'",
":",
"for",
"(",
"k",
",",
"uri",
",",
"typeindic",
")",
"in",
"subfield_list",
":",
"if",
"k",
"==",
"'@base'",
":",
"base",
"=",
"propbase",
"=",
"rtbase",
"=",
"uri",
"elif",
"k",
"==",
"'@property'",
":",
"propbase",
"=",
"uri",
"elif",
"k",
"==",
"'@resource-type'",
":",
"rtbase",
"=",
"uri",
"else",
":",
"iris",
"[",
"k",
"]",
"=",
"uri",
"elif",
"prop",
"==",
"'@interpretations'",
":",
"interp",
"=",
"{",
"}",
"for",
"k",
",",
"v",
",",
"x",
"in",
"subfield_list",
":",
"interp",
"[",
"I",
"(",
"iri",
".",
"absolutize",
"(",
"k",
",",
"propbase",
")",
")",
"]",
"=",
"v",
"setup_interpretations",
"(",
"interp",
")",
"elif",
"prop",
"==",
"'@document'",
":",
"document_iri",
"=",
"val",
"elif",
"prop",
"==",
"'@language'",
":",
"default_lang",
"=",
"val",
"elif",
"document_iri",
"or",
"base",
":",
"rid",
"=",
"document_iri",
"or",
"base",
"fullprop",
"=",
"I",
"(",
"iri",
".",
"absolutize",
"(",
"prop",
",",
"propbase",
"or",
"base",
")",
")",
"if",
"fullprop",
"in",
"interpretations",
":",
"val",
"=",
"interpretations",
"[",
"fullprop",
"]",
"(",
"val",
",",
"rid",
"=",
"rid",
",",
"fullprop",
"=",
"fullprop",
",",
"base",
"=",
"base",
",",
"model",
"=",
"model",
")",
"if",
"val",
"is",
"not",
"None",
":",
"model",
".",
"add",
"(",
"rid",
",",
"fullprop",
",",
"val",
")",
"else",
":",
"model",
".",
"add",
"(",
"rid",
",",
"fullprop",
",",
"val",
")",
"if",
"not",
"propbase",
":",
"propbase",
"=",
"base",
"if",
"not",
"rtbase",
":",
"rtbase",
"=",
"base",
"if",
"not",
"document_iri",
":",
"document_iri",
"=",
"base",
"for",
"sect",
"in",
"sections",
":",
"matched",
"=",
"RESOURCE_PAT",
".",
"match",
"(",
"sect",
".",
"xml_value",
")",
"if",
"not",
"matched",
":",
"raise",
"ValueError",
"(",
"_",
"(",
"'Syntax error in resource header: {0}'",
".",
"format",
"(",
"sect",
".",
"xml_value",
")",
")",
")",
"rid",
"=",
"matched",
".",
"group",
"(",
"1",
")",
"rtype",
"=",
"matched",
".",
"group",
"(",
"3",
")",
"if",
"rtype",
":",
"rtype",
"=",
"I",
"(",
"iri",
".",
"absolutize",
"(",
"rtype",
",",
"base",
")",
")",
"if",
"rid",
":",
"rid",
"=",
"I",
"(",
"iri",
".",
"absolutize",
"(",
"rid",
",",
"base",
")",
")",
"if",
"not",
"rid",
":",
"rid",
"=",
"next",
"(",
"idg",
")",
"if",
"not",
"rtype",
":",
"rtype",
"=",
"syntaxtypemap",
".",
"get",
"(",
"sect",
".",
"xml_name",
")",
"if",
"rtype",
":",
"model",
".",
"add",
"(",
"rid",
",",
"TYPE_REL",
",",
"rtype",
")",
"for",
"prop",
",",
"val",
",",
"typeindic",
",",
"subfield_list",
"in",
"fields",
"(",
"sect",
")",
":",
"attrs",
"=",
"{",
"}",
"for",
"(",
"aprop",
",",
"aval",
",",
"atype",
")",
"in",
"subfield_list",
"or",
"(",
")",
":",
"if",
"atype",
"==",
"RES_VAL",
":",
"valmatch",
"=",
"URI_ABBR_PAT",
".",
"match",
"(",
"aval",
")",
"if",
"valmatch",
":",
"uri",
"=",
"iris",
"[",
"valmatch",
".",
"group",
"(",
"1",
")",
"]",
"attrs",
"[",
"aprop",
"]",
"=",
"URI_ABBR_PAT",
".",
"sub",
"(",
"uri",
"+",
"'\\\\2\\\\3'",
",",
"aval",
")",
"else",
":",
"attrs",
"[",
"aprop",
"]",
"=",
"I",
"(",
"iri",
".",
"absolutize",
"(",
"aval",
",",
"rtbase",
")",
")",
"elif",
"atype",
"==",
"TEXT_VAL",
":",
"attrs",
"[",
"aprop",
"]",
"=",
"aval",
"elif",
"atype",
"==",
"UNKNOWN_VAL",
":",
"attrs",
"[",
"aprop",
"]",
"=",
"aval",
"if",
"aprop",
"in",
"interpretations",
":",
"aval",
"=",
"interpretations",
"[",
"aprop",
"]",
"(",
"aval",
",",
"rid",
"=",
"rid",
",",
"fullprop",
"=",
"aprop",
",",
"base",
"=",
"base",
",",
"model",
"=",
"model",
")",
"if",
"aval",
"is",
"not",
"None",
":",
"attrs",
"[",
"aprop",
"]",
"=",
"aval",
"else",
":",
"attrs",
"[",
"aprop",
"]",
"=",
"aval",
"propmatch",
"=",
"URI_ABBR_PAT",
".",
"match",
"(",
"prop",
")",
"if",
"propmatch",
":",
"uri",
"=",
"iris",
"[",
"propmatch",
".",
"group",
"(",
"1",
")",
"]",
"fullprop",
"=",
"URI_ABBR_PAT",
".",
"sub",
"(",
"uri",
"+",
"'\\\\2\\\\3'",
",",
"prop",
")",
"else",
":",
"fullprop",
"=",
"I",
"(",
"iri",
".",
"absolutize",
"(",
"prop",
",",
"propbase",
")",
")",
"if",
"typeindic",
"==",
"RES_VAL",
":",
"valmatch",
"=",
"URI_ABBR_PAT",
".",
"match",
"(",
"aval",
")",
"if",
"valmatch",
":",
"uri",
"=",
"iris",
"[",
"valmatch",
".",
"group",
"(",
"1",
")",
"]",
"val",
"=",
"URI_ABBR_PAT",
".",
"sub",
"(",
"uri",
"+",
"'\\\\2\\\\3'",
",",
"val",
")",
"else",
":",
"val",
"=",
"I",
"(",
"iri",
".",
"absolutize",
"(",
"val",
",",
"rtbase",
")",
")",
"model",
".",
"add",
"(",
"rid",
",",
"fullprop",
",",
"val",
",",
"attrs",
")",
"elif",
"typeindic",
"==",
"TEXT_VAL",
":",
"if",
"'@lang'",
"not",
"in",
"attrs",
":",
"attrs",
"[",
"'@lang'",
"]",
"=",
"default_lang",
"model",
".",
"add",
"(",
"rid",
",",
"fullprop",
",",
"val",
",",
"attrs",
")",
"elif",
"typeindic",
"==",
"UNKNOWN_VAL",
":",
"if",
"fullprop",
"in",
"interpretations",
":",
"val",
"=",
"interpretations",
"[",
"fullprop",
"]",
"(",
"val",
",",
"rid",
"=",
"rid",
",",
"fullprop",
"=",
"fullprop",
",",
"base",
"=",
"base",
",",
"model",
"=",
"model",
")",
"if",
"val",
"is",
"not",
"None",
":",
"model",
".",
"add",
"(",
"rid",
",",
"fullprop",
",",
"val",
")",
"else",
":",
"model",
".",
"add",
"(",
"rid",
",",
"fullprop",
",",
"val",
",",
"attrs",
")",
"return",
"document_iri"
] | python | Translate the Versa Markdown syntax into Versa model relationships
md -- markdown source text
model -- Versa model to take the output relationship
encoding -- character encoding (defaults to UTF-8)
Returns: The overall base URI (`@base`) specified in the Markdown file, or None
>>> from versa.driver import memory
>>> from versa.reader.md import from_markdown
>>> m = memory.connection()
>>> from_markdown(open('test/resource/poetry.md').read(), m)
'http://uche.ogbuji.net/poems/'
>>> m.size()
40
>>> next(m.match(None, 'http://uche.ogbuji.net/poems/updated', '2013-10-15'))
(I(http://uche.ogbuji.net/poems/1), I(http://uche.ogbuji.net/poems/updated), '2013-10-15', {}) | false |
1,928,500 | def _scrape_document(self):
'''Extract links from the DOM.'''
mock_response = self._new_mock_response(
self._response, self._get_temp_path('phantom', '.html')
)
self._item_session.request = self._request
self._item_session.response = mock_response
self._processing_rule.scrape_document(item_session)
if mock_response.body:
mock_response.body.close() | [
"def",
"_scrape_document",
"(",
"self",
")",
":",
"mock_response",
"=",
"self",
".",
"_new_mock_response",
"(",
"self",
".",
"_response",
",",
"self",
".",
"_get_temp_path",
"(",
"'phantom'",
",",
"'.html'",
")",
")",
"self",
".",
"_item_session",
".",
"request",
"=",
"self",
".",
"_request",
"self",
".",
"_item_session",
".",
"response",
"=",
"mock_response",
"self",
".",
"_processing_rule",
".",
"scrape_document",
"(",
"item_session",
")",
"if",
"mock_response",
".",
"body",
":",
"mock_response",
".",
"body",
".",
"close",
"(",
")"
] | python | Extract links from the DOM. | false |
2,577,271 | def runningstd(t, data, width):
"""Compute the running standard deviation of a time series.
Returns `t_new`, `std_r`.
"""
ne = len(t) - width
t_new = np.zeros(ne)
std_r = np.zeros(ne)
for i in range(ne):
t_new[i] = np.mean(t[i:i+width+1])
std_r[i] = scipy.stats.nanstd(data[i:i+width+1])
return t_new, std_r | [
"def",
"runningstd",
"(",
"t",
",",
"data",
",",
"width",
")",
":",
"ne",
"=",
"len",
"(",
"t",
")",
"-",
"width",
"t_new",
"=",
"np",
".",
"zeros",
"(",
"ne",
")",
"std_r",
"=",
"np",
".",
"zeros",
"(",
"ne",
")",
"for",
"i",
"in",
"range",
"(",
"ne",
")",
":",
"t_new",
"[",
"i",
"]",
"=",
"np",
".",
"mean",
"(",
"t",
"[",
"i",
":",
"i",
"+",
"width",
"+",
"1",
"]",
")",
"std_r",
"[",
"i",
"]",
"=",
"scipy",
".",
"stats",
".",
"nanstd",
"(",
"data",
"[",
"i",
":",
"i",
"+",
"width",
"+",
"1",
"]",
")",
"return",
"t_new",
",",
"std_r"
] | python | Compute the running standard deviation of a time series.
Returns `t_new`, `std_r`. | false |
1,700,399 | def DualDBSystemCronJob(legacy_name=None, stateful=False):
"""Decorator that creates AFF4 and RELDB cronjobs from a given mixin."""
def Decorator(cls):
"""Decorator producing 2 classes: legacy style one and a new style one."""
if not legacy_name:
raise ValueError("legacy_name has to be provided")
# Legacy cron jobs have different base classes depending on whether they're
# stateful or not.
if stateful:
aff4_base_cls = StatefulSystemCronFlow
else:
aff4_base_cls = SystemCronFlow
# Make sure that we're dealing with a true mixin to avoid subtle errors.
if issubclass(cls, cronjobs.SystemCronJobBase):
raise ValueError("Mixin class shouldn't inherit from SystemCronJobBase")
if issubclass(cls, aff4_base_cls):
raise ValueError("Mixin class shouldn't inherit from %s" %
aff4_base_cls.__name__)
# Generate legacy class. Register it within the module as it's not going
# to be returned from the decorator.
aff4_cls = compatibility.MakeType(
legacy_name, (cls, LegacyCronJobAdapterMixin, aff4_base_cls), {})
module = sys.modules[cls.__module__]
setattr(module, legacy_name, aff4_cls)
# Generate new class. No need to register it in the module (like the legacy
# one) since it will replace the original decorated class.
reldb_cls = compatibility.MakeType(
compatibility.GetName(cls), (cls, cronjobs.SystemCronJobBase), {})
return reldb_cls
return Decorator | [
"def",
"DualDBSystemCronJob",
"(",
"legacy_name",
"=",
"None",
",",
"stateful",
"=",
"False",
")",
":",
"def",
"Decorator",
"(",
"cls",
")",
":",
"if",
"not",
"legacy_name",
":",
"raise",
"ValueError",
"(",
"\"legacy_name has to be provided\"",
")",
"if",
"stateful",
":",
"aff4_base_cls",
"=",
"StatefulSystemCronFlow",
"else",
":",
"aff4_base_cls",
"=",
"SystemCronFlow",
"if",
"issubclass",
"(",
"cls",
",",
"cronjobs",
".",
"SystemCronJobBase",
")",
":",
"raise",
"ValueError",
"(",
"\"Mixin class shouldn't inherit from SystemCronJobBase\"",
")",
"if",
"issubclass",
"(",
"cls",
",",
"aff4_base_cls",
")",
":",
"raise",
"ValueError",
"(",
"\"Mixin class shouldn't inherit from %s\"",
"%",
"aff4_base_cls",
".",
"__name__",
")",
"aff4_cls",
"=",
"compatibility",
".",
"MakeType",
"(",
"legacy_name",
",",
"(",
"cls",
",",
"LegacyCronJobAdapterMixin",
",",
"aff4_base_cls",
")",
",",
"{",
"}",
")",
"module",
"=",
"sys",
".",
"modules",
"[",
"cls",
".",
"__module__",
"]",
"setattr",
"(",
"module",
",",
"legacy_name",
",",
"aff4_cls",
")",
"reldb_cls",
"=",
"compatibility",
".",
"MakeType",
"(",
"compatibility",
".",
"GetName",
"(",
"cls",
")",
",",
"(",
"cls",
",",
"cronjobs",
".",
"SystemCronJobBase",
")",
",",
"{",
"}",
")",
"return",
"reldb_cls",
"return",
"Decorator"
] | python | Decorator that creates AFF4 and RELDB cronjobs from a given mixin. | false |
2,518,030 | def update_value(self, id_number, new_value, metadata=None):
"""
Update a canned value
:type id_number: int
:param id_number: canned value ID number
:type new_value: str
:param new_value: New canned value value
:type metadata: str
:param metadata: Optional metadata
:rtype: dict
:return: an empty dictionary
"""
data = {
'id': id_number,
'new_value': new_value
}
if metadata is not None:
data['metadata'] = metadata
return self.post('updateValue', data) | [
"def",
"update_value",
"(",
"self",
",",
"id_number",
",",
"new_value",
",",
"metadata",
"=",
"None",
")",
":",
"data",
"=",
"{",
"'id'",
":",
"id_number",
",",
"'new_value'",
":",
"new_value",
"}",
"if",
"metadata",
"is",
"not",
"None",
":",
"data",
"[",
"'metadata'",
"]",
"=",
"metadata",
"return",
"self",
".",
"post",
"(",
"'updateValue'",
",",
"data",
")"
] | python | Update a canned value
:type id_number: int
:param id_number: canned value ID number
:type new_value: str
:param new_value: New canned value value
:type metadata: str
:param metadata: Optional metadata
:rtype: dict
:return: an empty dictionary | false |
2,576,172 | def load_data_table(table_name, meta_file, meta):
"""Return the contents and metadata of a given table.
Args:
table_name(str): Name of the table.
meta_file(str): Path to the meta.json file.
meta(dict): Contents of meta.json.
Returns:
tuple(pandas.DataFrame, dict)
"""
for table in meta['tables']:
if table['name'] == table_name:
prefix = os.path.dirname(meta_file)
relative_path = os.path.join(prefix, meta['path'], table['path'])
return pd.read_csv(relative_path), table | [
"def",
"load_data_table",
"(",
"table_name",
",",
"meta_file",
",",
"meta",
")",
":",
"for",
"table",
"in",
"meta",
"[",
"'tables'",
"]",
":",
"if",
"table",
"[",
"'name'",
"]",
"==",
"table_name",
":",
"prefix",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"meta_file",
")",
"relative_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"prefix",
",",
"meta",
"[",
"'path'",
"]",
",",
"table",
"[",
"'path'",
"]",
")",
"return",
"pd",
".",
"read_csv",
"(",
"relative_path",
")",
",",
"table"
] | python | Return the contents and metadata of a given table.
Args:
table_name(str): Name of the table.
meta_file(str): Path to the meta.json file.
meta(dict): Contents of meta.json.
Returns:
tuple(pandas.DataFrame, dict) | false |
2,324,600 | def _cdist_scipy(x, y, exponent=1):
"""Pairwise distance between the points in two sets."""
metric = 'euclidean'
if exponent != 1:
metric = 'sqeuclidean'
distances = _spatial.distance.cdist(x, y, metric=metric)
if exponent != 1:
distances **= exponent / 2
return distances | [
"def",
"_cdist_scipy",
"(",
"x",
",",
"y",
",",
"exponent",
"=",
"1",
")",
":",
"metric",
"=",
"'euclidean'",
"if",
"exponent",
"!=",
"1",
":",
"metric",
"=",
"'sqeuclidean'",
"distances",
"=",
"_spatial",
".",
"distance",
".",
"cdist",
"(",
"x",
",",
"y",
",",
"metric",
"=",
"metric",
")",
"if",
"exponent",
"!=",
"1",
":",
"distances",
"**=",
"exponent",
"/",
"2",
"return",
"distances"
] | python | Pairwise distance between the points in two sets. | false |
2,011,813 | def rotated(self, rotation_center, angle):
"""Returns a RotatedBox that is obtained by rotating this box around a given center by a given angle.
>>> assert RotatedBox([2, 2], 2, 1, 0.1).rotated([1, 1], np.pi/2).approx_equal([0, 2], 2, 1, np.pi/2+0.1)
"""
rot = np.array([[np.cos(angle), np.sin(angle)], [-np.sin(angle), np.cos(angle)]])
t = np.asfarray(rotation_center)
new_c = np.dot(rot.T, (self.center - t)) + t
return RotatedBox(new_c, self.width, self.height, (self.angle+angle) % (np.pi*2)) | [
"def",
"rotated",
"(",
"self",
",",
"rotation_center",
",",
"angle",
")",
":",
"rot",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"np",
".",
"cos",
"(",
"angle",
")",
",",
"np",
".",
"sin",
"(",
"angle",
")",
"]",
",",
"[",
"-",
"np",
".",
"sin",
"(",
"angle",
")",
",",
"np",
".",
"cos",
"(",
"angle",
")",
"]",
"]",
")",
"t",
"=",
"np",
".",
"asfarray",
"(",
"rotation_center",
")",
"new_c",
"=",
"np",
".",
"dot",
"(",
"rot",
".",
"T",
",",
"(",
"self",
".",
"center",
"-",
"t",
")",
")",
"+",
"t",
"return",
"RotatedBox",
"(",
"new_c",
",",
"self",
".",
"width",
",",
"self",
".",
"height",
",",
"(",
"self",
".",
"angle",
"+",
"angle",
")",
"%",
"(",
"np",
".",
"pi",
"*",
"2",
")",
")"
] | python | Returns a RotatedBox that is obtained by rotating this box around a given center by a given angle.
>>> assert RotatedBox([2, 2], 2, 1, 0.1).rotated([1, 1], np.pi/2).approx_equal([0, 2], 2, 1, np.pi/2+0.1) | false |
2,288,104 | def pbm(self):
""" Plumbum lazy property """
if not self.disable_rpyc:
from plumbum import SshMachine
return SshMachine(host=self.private_hostname, user=self.username,
keyfile=self.key_filename,
ssh_opts=["-o", "UserKnownHostsFile=/dev/null",
"-o", "StrictHostKeyChecking=no"])
else:
return None | [
"def",
"pbm",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"disable_rpyc",
":",
"from",
"plumbum",
"import",
"SshMachine",
"return",
"SshMachine",
"(",
"host",
"=",
"self",
".",
"private_hostname",
",",
"user",
"=",
"self",
".",
"username",
",",
"keyfile",
"=",
"self",
".",
"key_filename",
",",
"ssh_opts",
"=",
"[",
"\"-o\"",
",",
"\"UserKnownHostsFile=/dev/null\"",
",",
"\"-o\"",
",",
"\"StrictHostKeyChecking=no\"",
"]",
")",
"else",
":",
"return",
"None"
] | python | Plumbum lazy property | false |
2,113,928 | def generate_contentinfo_from_folder(self, csvwriter, rel_path, filenames):
"""
Create a topic node row in Content.csv for the folder at `rel_path` and
add content node rows for all the files in the `rel_path` folder.
"""
LOGGER.debug('IN process_folder ' + str(rel_path) + ' ' + str(filenames))
from ricecooker.utils.linecook import filter_filenames, filter_thumbnail_files, chan_path_from_rel_path
# WRITE TOPIC ROW
topicrow = self.channeldir_node_to_row( rel_path.split(os.path.sep) )
csvwriter.writerow(topicrow)
# WRITE CONTENT NODE ROWS
chan_path = chan_path_from_rel_path(rel_path, self.channeldir)
filenames_cleaned = filter_filenames(filenames)
# filenames_cleaned2 = filter_thumbnail_files(chan_path, filenames_cleaned, self)
for filename in filenames_cleaned:
path_tuple = rel_path.split(os.path.sep)
path_tuple.append(filename)
filerow = self.channeldir_node_to_row(path_tuple)
csvwriter.writerow(filerow) | [
"def",
"generate_contentinfo_from_folder",
"(",
"self",
",",
"csvwriter",
",",
"rel_path",
",",
"filenames",
")",
":",
"LOGGER",
".",
"debug",
"(",
"'IN process_folder '",
"+",
"str",
"(",
"rel_path",
")",
"+",
"' '",
"+",
"str",
"(",
"filenames",
")",
")",
"from",
"ricecooker",
".",
"utils",
".",
"linecook",
"import",
"filter_filenames",
",",
"filter_thumbnail_files",
",",
"chan_path_from_rel_path",
"topicrow",
"=",
"self",
".",
"channeldir_node_to_row",
"(",
"rel_path",
".",
"split",
"(",
"os",
".",
"path",
".",
"sep",
")",
")",
"csvwriter",
".",
"writerow",
"(",
"topicrow",
")",
"chan_path",
"=",
"chan_path_from_rel_path",
"(",
"rel_path",
",",
"self",
".",
"channeldir",
")",
"filenames_cleaned",
"=",
"filter_filenames",
"(",
"filenames",
")",
"for",
"filename",
"in",
"filenames_cleaned",
":",
"path_tuple",
"=",
"rel_path",
".",
"split",
"(",
"os",
".",
"path",
".",
"sep",
")",
"path_tuple",
".",
"append",
"(",
"filename",
")",
"filerow",
"=",
"self",
".",
"channeldir_node_to_row",
"(",
"path_tuple",
")",
"csvwriter",
".",
"writerow",
"(",
"filerow",
")"
] | python | Create a topic node row in Content.csv for the folder at `rel_path` and
add content node rows for all the files in the `rel_path` folder. | false |
2,044,253 | def aggregate(self, start, end):
'''
This method encpsualte the metric aggregation logic.
Override this method when you inherit this class.
By default, it takes the last value.
'''
last = self.objects(
level='daily', date__lte=self.iso(end),
date__gte=self.iso(start)).order_by('-date').first()
return last.values[self.name] | [
"def",
"aggregate",
"(",
"self",
",",
"start",
",",
"end",
")",
":",
"last",
"=",
"self",
".",
"objects",
"(",
"level",
"=",
"'daily'",
",",
"date__lte",
"=",
"self",
".",
"iso",
"(",
"end",
")",
",",
"date__gte",
"=",
"self",
".",
"iso",
"(",
"start",
")",
")",
".",
"order_by",
"(",
"'-date'",
")",
".",
"first",
"(",
")",
"return",
"last",
".",
"values",
"[",
"self",
".",
"name",
"]"
] | python | This method encpsualte the metric aggregation logic.
Override this method when you inherit this class.
By default, it takes the last value. | false |
1,861,413 | def _read_composites(self, compositor_nodes):
"""Read (generate) composites."""
keepables = set()
for item in compositor_nodes:
self._generate_composite(item, keepables)
return keepables | [
"def",
"_read_composites",
"(",
"self",
",",
"compositor_nodes",
")",
":",
"keepables",
"=",
"set",
"(",
")",
"for",
"item",
"in",
"compositor_nodes",
":",
"self",
".",
"_generate_composite",
"(",
"item",
",",
"keepables",
")",
"return",
"keepables"
] | python | Read (generate) composites. | false |
1,990,773 | def cli(env, identifier):
"""Cancel global IP."""
mgr = SoftLayer.NetworkManager(env.client)
global_ip_id = helpers.resolve_id(mgr.resolve_global_ip_ids, identifier,
name='global ip')
if not (env.skip_confirmations or formatting.no_going_back(global_ip_id)):
raise exceptions.CLIAbort('Aborted')
mgr.cancel_global_ip(global_ip_id) | [
"def",
"cli",
"(",
"env",
",",
"identifier",
")",
":",
"mgr",
"=",
"SoftLayer",
".",
"NetworkManager",
"(",
"env",
".",
"client",
")",
"global_ip_id",
"=",
"helpers",
".",
"resolve_id",
"(",
"mgr",
".",
"resolve_global_ip_ids",
",",
"identifier",
",",
"name",
"=",
"'global ip'",
")",
"if",
"not",
"(",
"env",
".",
"skip_confirmations",
"or",
"formatting",
".",
"no_going_back",
"(",
"global_ip_id",
")",
")",
":",
"raise",
"exceptions",
".",
"CLIAbort",
"(",
"'Aborted'",
")",
"mgr",
".",
"cancel_global_ip",
"(",
"global_ip_id",
")"
] | python | Cancel global IP. | false |
1,605,309 | def is_floating(dtype):
"""Returns whether this is a (non-quantized, real) floating point type."""
dtype = tf.as_dtype(dtype)
if hasattr(dtype, 'is_floating'):
return dtype.is_floating
return np.issubdtype(np.dtype(dtype), np.float) | [
"def",
"is_floating",
"(",
"dtype",
")",
":",
"dtype",
"=",
"tf",
".",
"as_dtype",
"(",
"dtype",
")",
"if",
"hasattr",
"(",
"dtype",
",",
"'is_floating'",
")",
":",
"return",
"dtype",
".",
"is_floating",
"return",
"np",
".",
"issubdtype",
"(",
"np",
".",
"dtype",
"(",
"dtype",
")",
",",
"np",
".",
"float",
")"
] | python | Returns whether this is a (non-quantized, real) floating point type. | false |
2,328,608 | def recursive_index_encode(int_array, max=32767, min=-32768):
"""Pack an integer array using recursive indexing.
:param int_array: the input array of integers
:param max: the maximum integer size
:param min: the minimum integer size
:return the array of integers after recursive index encoding"""
out_arr = []
for curr in int_array:
if curr >= 0 :
while curr >= max:
out_arr.append(max)
curr -= max
else:
while curr <= min:
out_arr.append(min)
curr += int(math.fabs(min))
out_arr.append(curr)
return out_arr | [
"def",
"recursive_index_encode",
"(",
"int_array",
",",
"max",
"=",
"32767",
",",
"min",
"=",
"-",
"32768",
")",
":",
"out_arr",
"=",
"[",
"]",
"for",
"curr",
"in",
"int_array",
":",
"if",
"curr",
">=",
"0",
":",
"while",
"curr",
">=",
"max",
":",
"out_arr",
".",
"append",
"(",
"max",
")",
"curr",
"-=",
"max",
"else",
":",
"while",
"curr",
"<=",
"min",
":",
"out_arr",
".",
"append",
"(",
"min",
")",
"curr",
"+=",
"int",
"(",
"math",
".",
"fabs",
"(",
"min",
")",
")",
"out_arr",
".",
"append",
"(",
"curr",
")",
"return",
"out_arr"
] | python | Pack an integer array using recursive indexing.
:param int_array: the input array of integers
:param max: the maximum integer size
:param min: the minimum integer size
:return the array of integers after recursive index encoding | false |
2,020,415 | def _lmowfv1(password):
"""
[MS-NLMP] v28.0 2016-07-14
3.3.1 NTLM v1 Authentication
Same function as LMOWFv1 in document to create a one way hash of the password. Only
used in NTLMv1 auth without session security
:param password: The password of the user we are trying to authenticate with
:return res: A Lan Manager hash of the password supplied
"""
# fix the password length to 14 bytes
password = password.upper()
lm_pw = password[0:14]
# do hash
magic_str = b"KGS!@#$%" # page 56 in [MS-NLMP v28.0]
res = b''
dobj = des.DES(lm_pw[0:7])
res = res + dobj.encrypt(magic_str)
dobj = des.DES(lm_pw[7:14])
res = res + dobj.encrypt(magic_str)
return res | [
"def",
"_lmowfv1",
"(",
"password",
")",
":",
"password",
"=",
"password",
".",
"upper",
"(",
")",
"lm_pw",
"=",
"password",
"[",
"0",
":",
"14",
"]",
"magic_str",
"=",
"b\"KGS!@#$%\"",
"res",
"=",
"b''",
"dobj",
"=",
"des",
".",
"DES",
"(",
"lm_pw",
"[",
"0",
":",
"7",
"]",
")",
"res",
"=",
"res",
"+",
"dobj",
".",
"encrypt",
"(",
"magic_str",
")",
"dobj",
"=",
"des",
".",
"DES",
"(",
"lm_pw",
"[",
"7",
":",
"14",
"]",
")",
"res",
"=",
"res",
"+",
"dobj",
".",
"encrypt",
"(",
"magic_str",
")",
"return",
"res"
] | python | [MS-NLMP] v28.0 2016-07-14
3.3.1 NTLM v1 Authentication
Same function as LMOWFv1 in document to create a one way hash of the password. Only
used in NTLMv1 auth without session security
:param password: The password of the user we are trying to authenticate with
:return res: A Lan Manager hash of the password supplied | false |
2,234,814 | def box_add(self, name, url, provider=None, force=False):
'''
Adds a box with given name, from given url.
force: If True, overwrite an existing box if it exists.
'''
force_opt = '--force' if force else None
cmd = ['box', 'add', name, url, force_opt]
if provider is not None:
cmd += ['--provider', provider]
self._call_vagrant_command(cmd) | [
"def",
"box_add",
"(",
"self",
",",
"name",
",",
"url",
",",
"provider",
"=",
"None",
",",
"force",
"=",
"False",
")",
":",
"force_opt",
"=",
"'--force'",
"if",
"force",
"else",
"None",
"cmd",
"=",
"[",
"'box'",
",",
"'add'",
",",
"name",
",",
"url",
",",
"force_opt",
"]",
"if",
"provider",
"is",
"not",
"None",
":",
"cmd",
"+=",
"[",
"'--provider'",
",",
"provider",
"]",
"self",
".",
"_call_vagrant_command",
"(",
"cmd",
")"
] | python | Adds a box with given name, from given url.
force: If True, overwrite an existing box if it exists. | false |
1,810,313 | def canPrepare(self, ppReq) -> (bool, str):
"""
Return whether the batch of requests in the PRE-PREPARE can
proceed to the PREPARE step.
:param ppReq: any object with identifier and requestId attributes
"""
if self.has_sent_prepare(ppReq):
return False, 'has already sent PREPARE for {}'.format(ppReq)
return True, '' | [
"def",
"canPrepare",
"(",
"self",
",",
"ppReq",
")",
"->",
"(",
"bool",
",",
"str",
")",
":",
"if",
"self",
".",
"has_sent_prepare",
"(",
"ppReq",
")",
":",
"return",
"False",
",",
"'has already sent PREPARE for {}'",
".",
"format",
"(",
"ppReq",
")",
"return",
"True",
",",
"''"
] | python | Return whether the batch of requests in the PRE-PREPARE can
proceed to the PREPARE step.
:param ppReq: any object with identifier and requestId attributes | false |
2,685,079 | def _default_pprint(obj, p, cycle):
"""
The default print function. Used if an object does not provide one and
it's none of the builtin objects.
"""
klass = getattr(obj, '__class__', None) or type(obj)
if getattr(klass, '__repr__', None) not in _baseclass_reprs:
# A user-provided repr.
p.text(repr(obj))
return
p.begin_group(1, '<')
p.pretty(klass)
p.text(' at 0x%x' % id(obj))
if cycle:
p.text(' ...')
elif p.verbose:
first = True
for key in dir(obj):
if not key.startswith('_'):
try:
value = getattr(obj, key)
except AttributeError:
continue
if isinstance(value, types.MethodType):
continue
if not first:
p.text(',')
p.breakable()
p.text(key)
p.text('=')
step = len(key) + 1
p.indentation += step
p.pretty(value)
p.indentation -= step
first = False
p.end_group(1, '>') | [
"def",
"_default_pprint",
"(",
"obj",
",",
"p",
",",
"cycle",
")",
":",
"klass",
"=",
"getattr",
"(",
"obj",
",",
"'__class__'",
",",
"None",
")",
"or",
"type",
"(",
"obj",
")",
"if",
"getattr",
"(",
"klass",
",",
"'__repr__'",
",",
"None",
")",
"not",
"in",
"_baseclass_reprs",
":",
"p",
".",
"text",
"(",
"repr",
"(",
"obj",
")",
")",
"return",
"p",
".",
"begin_group",
"(",
"1",
",",
"'<'",
")",
"p",
".",
"pretty",
"(",
"klass",
")",
"p",
".",
"text",
"(",
"' at 0x%x'",
"%",
"id",
"(",
"obj",
")",
")",
"if",
"cycle",
":",
"p",
".",
"text",
"(",
"' ...'",
")",
"elif",
"p",
".",
"verbose",
":",
"first",
"=",
"True",
"for",
"key",
"in",
"dir",
"(",
"obj",
")",
":",
"if",
"not",
"key",
".",
"startswith",
"(",
"'_'",
")",
":",
"try",
":",
"value",
"=",
"getattr",
"(",
"obj",
",",
"key",
")",
"except",
"AttributeError",
":",
"continue",
"if",
"isinstance",
"(",
"value",
",",
"types",
".",
"MethodType",
")",
":",
"continue",
"if",
"not",
"first",
":",
"p",
".",
"text",
"(",
"','",
")",
"p",
".",
"breakable",
"(",
")",
"p",
".",
"text",
"(",
"key",
")",
"p",
".",
"text",
"(",
"'='",
")",
"step",
"=",
"len",
"(",
"key",
")",
"+",
"1",
"p",
".",
"indentation",
"+=",
"step",
"p",
".",
"pretty",
"(",
"value",
")",
"p",
".",
"indentation",
"-=",
"step",
"first",
"=",
"False",
"p",
".",
"end_group",
"(",
"1",
",",
"'>'",
")"
] | python | The default print function. Used if an object does not provide one and
it's none of the builtin objects. | false |
1,601,135 | def QA_util_id2date(idx, client):
"""
从数据库中查询 通达信时间
:param idx: 字符串 -- 数据库index
:param client: pymongo.MongoClient类型 -- mongodb 数据库 从 QA_util_sql_mongo_setting 中 QA_util_sql_mongo_setting 获取
:return: Str -- 通达信数据库时间
"""
coll = client.quantaxis.trade_date
temp_str = coll.find_one({'num': idx})
return temp_str['date'] | [
"def",
"QA_util_id2date",
"(",
"idx",
",",
"client",
")",
":",
"coll",
"=",
"client",
".",
"quantaxis",
".",
"trade_date",
"temp_str",
"=",
"coll",
".",
"find_one",
"(",
"{",
"'num'",
":",
"idx",
"}",
")",
"return",
"temp_str",
"[",
"'date'",
"]"
] | python | 从数据库中查询 通达信时间
:param idx: 字符串 -- 数据库index
:param client: pymongo.MongoClient类型 -- mongodb 数据库 从 QA_util_sql_mongo_setting 中 QA_util_sql_mongo_setting 获取
:return: Str -- 通达信数据库时间 | false |
2,286,291 | def get_isotope_dicts(element='', database='ENDF_VII'):
"""return a dictionary with list of isotopes found in database and name of database files
Parameters:
===========
element: string. Name of the element
ex: 'Ag'
database: string (default is ENDF_VII)
Returns:
========
dictionary with isotopes and files
ex: {'Ag': {'isotopes': ['107-Ag','109-Ag'],
'file_names': ['Ag-107.csv','Ag-109.csv']}}
"""
_file_path = os.path.abspath(os.path.dirname(__file__))
_database_folder = os.path.join(_file_path, 'reference_data', database)
_element_search_path = os.path.join(_database_folder, element + '-*.csv')
list_files = glob.glob(_element_search_path)
if not list_files:
raise ValueError("File names contains NO '-', the name should in the format of 'Cd-115_m1' or 'Cd-114'")
list_files.sort()
isotope_dict = {'isotopes': {'list': [],
'file_names': [],
'density': {'value': np.NaN,
'units': 'g/cm3'},
'mass': {'value': [],
'units': 'g/mol',
},
'isotopic_ratio': [], },
'density': {'value': np.NaN,
'units': 'g/cm3'},
'molar_mass': {'value': np.NaN,
'units': 'g/mol'},
}
# isotope_dict_mirror = {}
_isotopes_list = []
_isotopes_list_files = []
_isotopes_mass = []
_isotopes_density = []
_isotopes_atomic_ratio = []
_density = np.NaN
_molar_mass = np.NaN
for file in list_files:
# Obtain element, z number from the basename
_basename = os.path.basename(file)
filename = os.path.splitext(_basename)[0]
if '-' in filename:
[_name, _number] = filename.split('-')
if '_' in _number:
[aaa, meta] = _number.split('_')
_number = aaa[:]
else:
_split_list = re.split(r'(\d+)', filename)
if len(_split_list) == 2:
[_name, _number] = _split_list
else:
_name = _split_list[0]
_number = _split_list[1]
if _number == '0':
_number = '12'
_symbol = _number + '-' + _name
isotope = str(_symbol)
_isotopes_list.append(isotope)
_isotopes_list_files.append(_basename)
_isotopes_mass.append(get_mass(isotope))
_isotopes_atomic_ratio.append(get_abundance(isotope))
_isotopes_density.append(get_density(isotope))
_density = get_density(element)
_molar_mass = get_mass(element)
isotope_dict['isotopes']['list'] = _isotopes_list
isotope_dict['isotopes']['file_names'] = _isotopes_list_files
isotope_dict['isotopes']['mass']['value'] = _isotopes_mass
isotope_dict['isotopes']['isotopic_ratio'] = _isotopes_atomic_ratio
isotope_dict['isotopes']['density']['value'] = _isotopes_density
isotope_dict['density']['value'] = _density
isotope_dict['molar_mass']['value'] = _molar_mass
return isotope_dict | [
"def",
"get_isotope_dicts",
"(",
"element",
"=",
"''",
",",
"database",
"=",
"'ENDF_VII'",
")",
":",
"_file_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
"_database_folder",
"=",
"os",
".",
"path",
".",
"join",
"(",
"_file_path",
",",
"'reference_data'",
",",
"database",
")",
"_element_search_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"_database_folder",
",",
"element",
"+",
"'-*.csv'",
")",
"list_files",
"=",
"glob",
".",
"glob",
"(",
"_element_search_path",
")",
"if",
"not",
"list_files",
":",
"raise",
"ValueError",
"(",
"\"File names contains NO '-', the name should in the format of 'Cd-115_m1' or 'Cd-114'\"",
")",
"list_files",
".",
"sort",
"(",
")",
"isotope_dict",
"=",
"{",
"'isotopes'",
":",
"{",
"'list'",
":",
"[",
"]",
",",
"'file_names'",
":",
"[",
"]",
",",
"'density'",
":",
"{",
"'value'",
":",
"np",
".",
"NaN",
",",
"'units'",
":",
"'g/cm3'",
"}",
",",
"'mass'",
":",
"{",
"'value'",
":",
"[",
"]",
",",
"'units'",
":",
"'g/mol'",
",",
"}",
",",
"'isotopic_ratio'",
":",
"[",
"]",
",",
"}",
",",
"'density'",
":",
"{",
"'value'",
":",
"np",
".",
"NaN",
",",
"'units'",
":",
"'g/cm3'",
"}",
",",
"'molar_mass'",
":",
"{",
"'value'",
":",
"np",
".",
"NaN",
",",
"'units'",
":",
"'g/mol'",
"}",
",",
"}",
"_isotopes_list",
"=",
"[",
"]",
"_isotopes_list_files",
"=",
"[",
"]",
"_isotopes_mass",
"=",
"[",
"]",
"_isotopes_density",
"=",
"[",
"]",
"_isotopes_atomic_ratio",
"=",
"[",
"]",
"_density",
"=",
"np",
".",
"NaN",
"_molar_mass",
"=",
"np",
".",
"NaN",
"for",
"file",
"in",
"list_files",
":",
"_basename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"file",
")",
"filename",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"_basename",
")",
"[",
"0",
"]",
"if",
"'-'",
"in",
"filename",
":",
"[",
"_name",
",",
"_number",
"]",
"=",
"filename",
".",
"split",
"(",
"'-'",
")",
"if",
"'_'",
"in",
"_number",
":",
"[",
"aaa",
",",
"meta",
"]",
"=",
"_number",
".",
"split",
"(",
"'_'",
")",
"_number",
"=",
"aaa",
"[",
":",
"]",
"else",
":",
"_split_list",
"=",
"re",
".",
"split",
"(",
"r'(\\d+)'",
",",
"filename",
")",
"if",
"len",
"(",
"_split_list",
")",
"==",
"2",
":",
"[",
"_name",
",",
"_number",
"]",
"=",
"_split_list",
"else",
":",
"_name",
"=",
"_split_list",
"[",
"0",
"]",
"_number",
"=",
"_split_list",
"[",
"1",
"]",
"if",
"_number",
"==",
"'0'",
":",
"_number",
"=",
"'12'",
"_symbol",
"=",
"_number",
"+",
"'-'",
"+",
"_name",
"isotope",
"=",
"str",
"(",
"_symbol",
")",
"_isotopes_list",
".",
"append",
"(",
"isotope",
")",
"_isotopes_list_files",
".",
"append",
"(",
"_basename",
")",
"_isotopes_mass",
".",
"append",
"(",
"get_mass",
"(",
"isotope",
")",
")",
"_isotopes_atomic_ratio",
".",
"append",
"(",
"get_abundance",
"(",
"isotope",
")",
")",
"_isotopes_density",
".",
"append",
"(",
"get_density",
"(",
"isotope",
")",
")",
"_density",
"=",
"get_density",
"(",
"element",
")",
"_molar_mass",
"=",
"get_mass",
"(",
"element",
")",
"isotope_dict",
"[",
"'isotopes'",
"]",
"[",
"'list'",
"]",
"=",
"_isotopes_list",
"isotope_dict",
"[",
"'isotopes'",
"]",
"[",
"'file_names'",
"]",
"=",
"_isotopes_list_files",
"isotope_dict",
"[",
"'isotopes'",
"]",
"[",
"'mass'",
"]",
"[",
"'value'",
"]",
"=",
"_isotopes_mass",
"isotope_dict",
"[",
"'isotopes'",
"]",
"[",
"'isotopic_ratio'",
"]",
"=",
"_isotopes_atomic_ratio",
"isotope_dict",
"[",
"'isotopes'",
"]",
"[",
"'density'",
"]",
"[",
"'value'",
"]",
"=",
"_isotopes_density",
"isotope_dict",
"[",
"'density'",
"]",
"[",
"'value'",
"]",
"=",
"_density",
"isotope_dict",
"[",
"'molar_mass'",
"]",
"[",
"'value'",
"]",
"=",
"_molar_mass",
"return",
"isotope_dict"
] | python | return a dictionary with list of isotopes found in database and name of database files
Parameters:
===========
element: string. Name of the element
ex: 'Ag'
database: string (default is ENDF_VII)
Returns:
========
dictionary with isotopes and files
ex: {'Ag': {'isotopes': ['107-Ag','109-Ag'],
'file_names': ['Ag-107.csv','Ag-109.csv']}} | false |
2,149,648 | def get(key, default=-1):
"""Backport support for original codes."""
if isinstance(key, int):
return Operation(key)
if key not in Operation._member_map_:
extend_enum(Operation, key, default)
return Operation[key] | [
"def",
"get",
"(",
"key",
",",
"default",
"=",
"-",
"1",
")",
":",
"if",
"isinstance",
"(",
"key",
",",
"int",
")",
":",
"return",
"Operation",
"(",
"key",
")",
"if",
"key",
"not",
"in",
"Operation",
".",
"_member_map_",
":",
"extend_enum",
"(",
"Operation",
",",
"key",
",",
"default",
")",
"return",
"Operation",
"[",
"key",
"]"
] | python | Backport support for original codes. | false |
2,363,973 | def parse_get_list_response(content):
"""Parses of response content XML from WebDAV server and extract file and directory names.
:param content: the XML content of HTTP response from WebDAV server for getting list of files by remote path.
:return: list of extracted file or directory names.
"""
try:
tree = etree.fromstring(content)
hrees = [Urn.separate + unquote(urlsplit(hree.text).path) for hree in tree.findall('.//{DAV:}href')]
return [Urn(hree) for hree in hrees]
except etree.XMLSyntaxError:
return list() | [
"def",
"parse_get_list_response",
"(",
"content",
")",
":",
"try",
":",
"tree",
"=",
"etree",
".",
"fromstring",
"(",
"content",
")",
"hrees",
"=",
"[",
"Urn",
".",
"separate",
"+",
"unquote",
"(",
"urlsplit",
"(",
"hree",
".",
"text",
")",
".",
"path",
")",
"for",
"hree",
"in",
"tree",
".",
"findall",
"(",
"'.//{DAV:}href'",
")",
"]",
"return",
"[",
"Urn",
"(",
"hree",
")",
"for",
"hree",
"in",
"hrees",
"]",
"except",
"etree",
".",
"XMLSyntaxError",
":",
"return",
"list",
"(",
")"
] | python | Parses of response content XML from WebDAV server and extract file and directory names.
:param content: the XML content of HTTP response from WebDAV server for getting list of files by remote path.
:return: list of extracted file or directory names. | false |
2,433,759 | def convertDict2Attrs(self, *args, **kwargs):
"""The trick for iterable Mambu Objects comes here:
You iterate over each element of the responded List from Mambu,
and create a Mambu Transaction object for each one, initializing
them one at a time, and changing the attrs attribute (which just
holds a list of plain dictionaries) with a MambuTransaction just
created.
You send a None urlfunc and entid to each MambuTransaction,
because there's no method to retrieve individual transactions
from Mambu.
.. todo:: use the transactionid as a possible entid here.
"""
for n,t in enumerate(self.attrs):
# ok ok, I'm modifying elements of a list while iterating it. BAD PRACTICE!
try:
params = self.params
except AttributeError as aerr:
params = {}
kwargs.update(params)
try:
trans = self.mambutransactionclass(urlfunc=None, entid=None, *args, **kwargs)
except AttributeError as ae:
self.mambutransactionclass = MambuTransaction
trans = self.mambutransactionclass(urlfunc=None, entid=None, *args, **kwargs)
trans.init(t, *args, **kwargs)
self.attrs[n] = trans | [
"def",
"convertDict2Attrs",
"(",
"self",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"for",
"n",
",",
"t",
"in",
"enumerate",
"(",
"self",
".",
"attrs",
")",
":",
"try",
":",
"params",
"=",
"self",
".",
"params",
"except",
"AttributeError",
"as",
"aerr",
":",
"params",
"=",
"{",
"}",
"kwargs",
".",
"update",
"(",
"params",
")",
"try",
":",
"trans",
"=",
"self",
".",
"mambutransactionclass",
"(",
"urlfunc",
"=",
"None",
",",
"entid",
"=",
"None",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
"except",
"AttributeError",
"as",
"ae",
":",
"self",
".",
"mambutransactionclass",
"=",
"MambuTransaction",
"trans",
"=",
"self",
".",
"mambutransactionclass",
"(",
"urlfunc",
"=",
"None",
",",
"entid",
"=",
"None",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
"trans",
".",
"init",
"(",
"t",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
"self",
".",
"attrs",
"[",
"n",
"]",
"=",
"trans"
] | python | The trick for iterable Mambu Objects comes here:
You iterate over each element of the responded List from Mambu,
and create a Mambu Transaction object for each one, initializing
them one at a time, and changing the attrs attribute (which just
holds a list of plain dictionaries) with a MambuTransaction just
created.
You send a None urlfunc and entid to each MambuTransaction,
because there's no method to retrieve individual transactions
from Mambu.
.. todo:: use the transactionid as a possible entid here. | false |
1,995,739 | def _prepare_atoms(topology, compute_cycles=False):
"""Compute cycles and add white-/blacklists to atoms."""
atom1 = next(topology.atoms())
has_whitelists = hasattr(atom1, 'whitelist')
has_cycles = hasattr(atom1, 'cycles')
compute_cycles = compute_cycles and not has_cycles
if compute_cycles or not has_whitelists:
for atom in topology.atoms():
if compute_cycles:
atom.cycles = set()
if not has_whitelists:
atom.whitelist = OrderedSet()
atom.blacklist = OrderedSet()
if compute_cycles:
bond_graph = nx.Graph()
bond_graph.add_nodes_from(topology.atoms())
bond_graph.add_edges_from(topology.bonds())
all_cycles = _find_chordless_cycles(bond_graph, max_cycle_size=8)
for atom, cycles in zip(bond_graph.nodes, all_cycles):
for cycle in cycles:
atom.cycles.add(tuple(cycle)) | [
"def",
"_prepare_atoms",
"(",
"topology",
",",
"compute_cycles",
"=",
"False",
")",
":",
"atom1",
"=",
"next",
"(",
"topology",
".",
"atoms",
"(",
")",
")",
"has_whitelists",
"=",
"hasattr",
"(",
"atom1",
",",
"'whitelist'",
")",
"has_cycles",
"=",
"hasattr",
"(",
"atom1",
",",
"'cycles'",
")",
"compute_cycles",
"=",
"compute_cycles",
"and",
"not",
"has_cycles",
"if",
"compute_cycles",
"or",
"not",
"has_whitelists",
":",
"for",
"atom",
"in",
"topology",
".",
"atoms",
"(",
")",
":",
"if",
"compute_cycles",
":",
"atom",
".",
"cycles",
"=",
"set",
"(",
")",
"if",
"not",
"has_whitelists",
":",
"atom",
".",
"whitelist",
"=",
"OrderedSet",
"(",
")",
"atom",
".",
"blacklist",
"=",
"OrderedSet",
"(",
")",
"if",
"compute_cycles",
":",
"bond_graph",
"=",
"nx",
".",
"Graph",
"(",
")",
"bond_graph",
".",
"add_nodes_from",
"(",
"topology",
".",
"atoms",
"(",
")",
")",
"bond_graph",
".",
"add_edges_from",
"(",
"topology",
".",
"bonds",
"(",
")",
")",
"all_cycles",
"=",
"_find_chordless_cycles",
"(",
"bond_graph",
",",
"max_cycle_size",
"=",
"8",
")",
"for",
"atom",
",",
"cycles",
"in",
"zip",
"(",
"bond_graph",
".",
"nodes",
",",
"all_cycles",
")",
":",
"for",
"cycle",
"in",
"cycles",
":",
"atom",
".",
"cycles",
".",
"add",
"(",
"tuple",
"(",
"cycle",
")",
")"
] | python | Compute cycles and add white-/blacklists to atoms. | false |
2,173,889 | def _find_bgzip():
"""return path to bgzip if found and meets version requirements, else exception"""
missing_file_exception = OSError if six.PY2 else FileNotFoundError
min_bgzip_version = ".".join(map(str, min_bgzip_version_info))
exe = os.environ.get("SEQREPO_BGZIP_PATH", which("bgzip") or "/usr/bin/bgzip")
try:
bgzip_version = _get_bgzip_version(exe)
except AttributeError:
raise RuntimeError("Didn't find version string in bgzip executable ({exe})".format(exe=exe))
except missing_file_exception:
raise RuntimeError("{exe} doesn't exist; you need to install htslib (See https://github.com/biocommons/biocommons.seqrepo#requirements)".format(exe=exe))
except Exception:
raise RuntimeError("Unknown error while executing {exe}".format(exe=exe))
bgzip_version_info = tuple(map(int, bgzip_version.split(".")))
if bgzip_version_info < min_bgzip_version_info:
raise RuntimeError("bgzip ({exe}) {ev} is too old; >= {rv} is required; please upgrade".format(
exe=exe, ev=bgzip_version, rv=min_bgzip_version))
logger.info("Using bgzip {ev} ({exe})".format(ev=bgzip_version, exe=exe))
return exe | [
"def",
"_find_bgzip",
"(",
")",
":",
"missing_file_exception",
"=",
"OSError",
"if",
"six",
".",
"PY2",
"else",
"FileNotFoundError",
"min_bgzip_version",
"=",
"\".\"",
".",
"join",
"(",
"map",
"(",
"str",
",",
"min_bgzip_version_info",
")",
")",
"exe",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"SEQREPO_BGZIP_PATH\"",
",",
"which",
"(",
"\"bgzip\"",
")",
"or",
"\"/usr/bin/bgzip\"",
")",
"try",
":",
"bgzip_version",
"=",
"_get_bgzip_version",
"(",
"exe",
")",
"except",
"AttributeError",
":",
"raise",
"RuntimeError",
"(",
"\"Didn't find version string in bgzip executable ({exe})\"",
".",
"format",
"(",
"exe",
"=",
"exe",
")",
")",
"except",
"missing_file_exception",
":",
"raise",
"RuntimeError",
"(",
"\"{exe} doesn't exist; you need to install htslib (See https://github.com/biocommons/biocommons.seqrepo#requirements)\"",
".",
"format",
"(",
"exe",
"=",
"exe",
")",
")",
"except",
"Exception",
":",
"raise",
"RuntimeError",
"(",
"\"Unknown error while executing {exe}\"",
".",
"format",
"(",
"exe",
"=",
"exe",
")",
")",
"bgzip_version_info",
"=",
"tuple",
"(",
"map",
"(",
"int",
",",
"bgzip_version",
".",
"split",
"(",
"\".\"",
")",
")",
")",
"if",
"bgzip_version_info",
"<",
"min_bgzip_version_info",
":",
"raise",
"RuntimeError",
"(",
"\"bgzip ({exe}) {ev} is too old; >= {rv} is required; please upgrade\"",
".",
"format",
"(",
"exe",
"=",
"exe",
",",
"ev",
"=",
"bgzip_version",
",",
"rv",
"=",
"min_bgzip_version",
")",
")",
"logger",
".",
"info",
"(",
"\"Using bgzip {ev} ({exe})\"",
".",
"format",
"(",
"ev",
"=",
"bgzip_version",
",",
"exe",
"=",
"exe",
")",
")",
"return",
"exe"
] | python | return path to bgzip if found and meets version requirements, else exception | false |
1,619,809 | def approxEqual(x, y, *args, **kwargs):
"""approxEqual(float1, float2[, tol=1e-18, rel=1e-7]) -> True|False
approxEqual(obj1, obj2[, *args, **kwargs]) -> True|False
Return True if x and y are approximately equal, otherwise False.
If x and y are floats, return True if y is within either absolute error
tol or relative error rel of x. You can disable either the absolute or
relative check by passing None as tol or rel (but not both).
For any other objects, x and y are checked in that order for a method
__approxEqual__, and the result of that is returned as a bool. Any
optional arguments are passed to the __approxEqual__ method.
__approxEqual__ can return NotImplemented to signal that it doesn't know
how to perform that specific comparison, in which case the other object is
checked instead. If neither object have the method, or both defer by
returning NotImplemented, approxEqual falls back on the same numeric
comparison used for floats.
>>> almost_equal(1.2345678, 1.2345677)
True
>>> almost_equal(1.234, 1.235)
False
"""
if not (type(x) is type(y) is float):
# Skip checking for __approxEqual__ in the common case of two floats.
methodname = '__approxEqual__'
# Allow the objects to specify what they consider "approximately equal",
# giving precedence to x. If either object has the appropriate method, we
# pass on any optional arguments untouched.
for a,b in ((x, y), (y, x)):
try:
method = getattr(a, methodname)
except AttributeError:
continue
else:
result = method(b, *args, **kwargs)
if result is NotImplemented:
print "WARNING: NotImplemented approxEqual for types"
continue
return bool(result)
# If we get here without returning, then neither x nor y knows how to do an
# approximate equal comparison (or are both floats). Fall back to a numeric
# comparison.
return _float_approxEqual(x, y, *args, **kwargs) | [
"def",
"approxEqual",
"(",
"x",
",",
"y",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"if",
"not",
"(",
"type",
"(",
"x",
")",
"is",
"type",
"(",
"y",
")",
"is",
"float",
")",
":",
"methodname",
"=",
"'__approxEqual__'",
"for",
"a",
",",
"b",
"in",
"(",
"(",
"x",
",",
"y",
")",
",",
"(",
"y",
",",
"x",
")",
")",
":",
"try",
":",
"method",
"=",
"getattr",
"(",
"a",
",",
"methodname",
")",
"except",
"AttributeError",
":",
"continue",
"else",
":",
"result",
"=",
"method",
"(",
"b",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
"if",
"result",
"is",
"NotImplemented",
":",
"print",
"\"WARNING: NotImplemented approxEqual for types\"",
"continue",
"return",
"bool",
"(",
"result",
")",
"return",
"_float_approxEqual",
"(",
"x",
",",
"y",
",",
"*",
"args",
",",
"**",
"kwargs",
")"
] | python | approxEqual(float1, float2[, tol=1e-18, rel=1e-7]) -> True|False
approxEqual(obj1, obj2[, *args, **kwargs]) -> True|False
Return True if x and y are approximately equal, otherwise False.
If x and y are floats, return True if y is within either absolute error
tol or relative error rel of x. You can disable either the absolute or
relative check by passing None as tol or rel (but not both).
For any other objects, x and y are checked in that order for a method
__approxEqual__, and the result of that is returned as a bool. Any
optional arguments are passed to the __approxEqual__ method.
__approxEqual__ can return NotImplemented to signal that it doesn't know
how to perform that specific comparison, in which case the other object is
checked instead. If neither object have the method, or both defer by
returning NotImplemented, approxEqual falls back on the same numeric
comparison used for floats.
>>> almost_equal(1.2345678, 1.2345677)
True
>>> almost_equal(1.234, 1.235)
False | false |
2,161,801 | def patch_cmdline_parser():
"""
Patches the ``luigi.cmdline_parser.CmdlineParser`` to store the original command line arguments
for later processing in the :py:class:`law.config.Config`.
"""
# store original functions
_init = luigi.cmdline_parser.CmdlineParser.__init__
# patch init
def __init__(self, cmdline_args):
_init(self, cmdline_args)
self.cmdline_args = cmdline_args
luigi.cmdline_parser.CmdlineParser.__init__ = __init__ | [
"def",
"patch_cmdline_parser",
"(",
")",
":",
"_init",
"=",
"luigi",
".",
"cmdline_parser",
".",
"CmdlineParser",
".",
"__init__",
"def",
"__init__",
"(",
"self",
",",
"cmdline_args",
")",
":",
"_init",
"(",
"self",
",",
"cmdline_args",
")",
"self",
".",
"cmdline_args",
"=",
"cmdline_args",
"luigi",
".",
"cmdline_parser",
".",
"CmdlineParser",
".",
"__init__",
"=",
"__init__"
] | python | Patches the ``luigi.cmdline_parser.CmdlineParser`` to store the original command line arguments
for later processing in the :py:class:`law.config.Config`. | false |
2,641,223 | def _parse_xml(self, xml):
"""Extracts objects representing and interacting with the settings in the
xml tag.
"""
vms("Parsing <testing> XML child tag.", 2)
self.timeout = get_attrib(xml, "timeout", cast=int)
for child in xml:
if child.tag == "command":
self.tests.append({"command": child.text, "end": None,
"success": False, "code": None,
"start": None, "result": None}) | [
"def",
"_parse_xml",
"(",
"self",
",",
"xml",
")",
":",
"vms",
"(",
"\"Parsing <testing> XML child tag.\"",
",",
"2",
")",
"self",
".",
"timeout",
"=",
"get_attrib",
"(",
"xml",
",",
"\"timeout\"",
",",
"cast",
"=",
"int",
")",
"for",
"child",
"in",
"xml",
":",
"if",
"child",
".",
"tag",
"==",
"\"command\"",
":",
"self",
".",
"tests",
".",
"append",
"(",
"{",
"\"command\"",
":",
"child",
".",
"text",
",",
"\"end\"",
":",
"None",
",",
"\"success\"",
":",
"False",
",",
"\"code\"",
":",
"None",
",",
"\"start\"",
":",
"None",
",",
"\"result\"",
":",
"None",
"}",
")"
] | python | Extracts objects representing and interacting with the settings in the
xml tag. | false |
2,610,847 | def svg2png(svg_file_path, png_file_path, dpi=150, inkscape_binpath=None):
""" Transform SVG file to PNG file
"""
return inkscape_export(svg_file_path, png_file_path, export_flag="-e",
dpi=dpi, inkscape_binpath=inkscape_binpath) | [
"def",
"svg2png",
"(",
"svg_file_path",
",",
"png_file_path",
",",
"dpi",
"=",
"150",
",",
"inkscape_binpath",
"=",
"None",
")",
":",
"return",
"inkscape_export",
"(",
"svg_file_path",
",",
"png_file_path",
",",
"export_flag",
"=",
"\"-e\"",
",",
"dpi",
"=",
"dpi",
",",
"inkscape_binpath",
"=",
"inkscape_binpath",
")"
] | python | Transform SVG file to PNG file | false |
2,088,034 | def process(self):
"""Periodic nonblocking processes"""
super(NativeBLEVirtualInterface, self).process()
if (not self._stream_sm_running) and (not self.reports.empty()):
self._stream_data()
if (not self._trace_sm_running) and (not self.traces.empty()):
self._send_trace() | [
"def",
"process",
"(",
"self",
")",
":",
"super",
"(",
"NativeBLEVirtualInterface",
",",
"self",
")",
".",
"process",
"(",
")",
"if",
"(",
"not",
"self",
".",
"_stream_sm_running",
")",
"and",
"(",
"not",
"self",
".",
"reports",
".",
"empty",
"(",
")",
")",
":",
"self",
".",
"_stream_data",
"(",
")",
"if",
"(",
"not",
"self",
".",
"_trace_sm_running",
")",
"and",
"(",
"not",
"self",
".",
"traces",
".",
"empty",
"(",
")",
")",
":",
"self",
".",
"_send_trace",
"(",
")"
] | python | Periodic nonblocking processes | false |
2,186,486 | def transport_jsonrpc(self):
"""
Installs the JSON-RPC transport bundles and instantiates components
"""
# Install the bundle
self.context.install_bundle("pelix.remote.json_rpc").start()
with use_waiting_list(self.context) as ipopo:
# Instantiate the discovery
ipopo.add(
rs.FACTORY_TRANSPORT_JSONRPC_EXPORTER, "pelix-jsonrpc-exporter"
)
ipopo.add(
rs.FACTORY_TRANSPORT_JSONRPC_IMPORTER, "pelix-jsonrpc-importer"
) | [
"def",
"transport_jsonrpc",
"(",
"self",
")",
":",
"self",
".",
"context",
".",
"install_bundle",
"(",
"\"pelix.remote.json_rpc\"",
")",
".",
"start",
"(",
")",
"with",
"use_waiting_list",
"(",
"self",
".",
"context",
")",
"as",
"ipopo",
":",
"ipopo",
".",
"add",
"(",
"rs",
".",
"FACTORY_TRANSPORT_JSONRPC_EXPORTER",
",",
"\"pelix-jsonrpc-exporter\"",
")",
"ipopo",
".",
"add",
"(",
"rs",
".",
"FACTORY_TRANSPORT_JSONRPC_IMPORTER",
",",
"\"pelix-jsonrpc-importer\"",
")"
] | python | Installs the JSON-RPC transport bundles and instantiates components | false |
2,421,113 | def path_from_keywords(keywords,into='path'):
'''
turns keyword pairs into path or filename
if `into=='path'`, then keywords are separted by underscores, else keywords are used to create a directory hierarchy
'''
subdirs = []
def prepare_string(s):
s = str(s)
s = re.sub('[][{},*"'+f"'{os.sep}]",'_',s)#replace characters that make bash life difficult by underscore
if into=='file':
s = s.replace('_', ' ')#Remove underscore because they will be used as separator
if ' ' in s:
s = s.title()
s = s.replace(' ','')
return s
if isinstance(keywords,set):
keywords_list = sorted(keywords)
for property in keywords_list:
subdirs.append(prepare_string(property))
else:
keywords_list = sorted(keywords.items())
for property,value in keywords_list: # @reservedassignment
if Bool.valid(value):
subdirs.append(('' if value else ('not_' if into=='path' else 'not'))+prepare_string(property))
#elif String.valid(value):
# subdirs.append(prepare_string(value))
elif (Float|Integer).valid(value):
subdirs.append('{}{}'.format(prepare_string(property),prepare_string(value)))
else:
subdirs.append('{}{}{}'.format(prepare_string(property),'_' if into == 'path' else '',prepare_string(value)))
if into == 'path':
out = os.path.join(*subdirs)
else:
out = '_'.join(subdirs)
return out | [
"def",
"path_from_keywords",
"(",
"keywords",
",",
"into",
"=",
"'path'",
")",
":",
"subdirs",
"=",
"[",
"]",
"def",
"prepare_string",
"(",
"s",
")",
":",
"s",
"=",
"str",
"(",
"s",
")",
"s",
"=",
"re",
".",
"sub",
"(",
"'[][{},*\"'",
"+",
"f\"'{os.sep}]\"",
",",
"'_'",
",",
"s",
")",
"if",
"into",
"==",
"'file'",
":",
"s",
"=",
"s",
".",
"replace",
"(",
"'_'",
",",
"' '",
")",
"if",
"' '",
"in",
"s",
":",
"s",
"=",
"s",
".",
"title",
"(",
")",
"s",
"=",
"s",
".",
"replace",
"(",
"' '",
",",
"''",
")",
"return",
"s",
"if",
"isinstance",
"(",
"keywords",
",",
"set",
")",
":",
"keywords_list",
"=",
"sorted",
"(",
"keywords",
")",
"for",
"property",
"in",
"keywords_list",
":",
"subdirs",
".",
"append",
"(",
"prepare_string",
"(",
"property",
")",
")",
"else",
":",
"keywords_list",
"=",
"sorted",
"(",
"keywords",
".",
"items",
"(",
")",
")",
"for",
"property",
",",
"value",
"in",
"keywords_list",
":",
"if",
"Bool",
".",
"valid",
"(",
"value",
")",
":",
"subdirs",
".",
"append",
"(",
"(",
"''",
"if",
"value",
"else",
"(",
"'not_'",
"if",
"into",
"==",
"'path'",
"else",
"'not'",
")",
")",
"+",
"prepare_string",
"(",
"property",
")",
")",
"elif",
"(",
"Float",
"|",
"Integer",
")",
".",
"valid",
"(",
"value",
")",
":",
"subdirs",
".",
"append",
"(",
"'{}{}'",
".",
"format",
"(",
"prepare_string",
"(",
"property",
")",
",",
"prepare_string",
"(",
"value",
")",
")",
")",
"else",
":",
"subdirs",
".",
"append",
"(",
"'{}{}{}'",
".",
"format",
"(",
"prepare_string",
"(",
"property",
")",
",",
"'_'",
"if",
"into",
"==",
"'path'",
"else",
"''",
",",
"prepare_string",
"(",
"value",
")",
")",
")",
"if",
"into",
"==",
"'path'",
":",
"out",
"=",
"os",
".",
"path",
".",
"join",
"(",
"*",
"subdirs",
")",
"else",
":",
"out",
"=",
"'_'",
".",
"join",
"(",
"subdirs",
")",
"return",
"out"
] | python | turns keyword pairs into path or filename
if `into=='path'`, then keywords are separted by underscores, else keywords are used to create a directory hierarchy | false |
1,599,466 | def winsorize(row, min_percentile, max_percentile):
"""
This implementation is based on scipy.stats.mstats.winsorize
"""
a = row.copy()
nan_count = isnan(row).sum()
nonnan_count = a.size - nan_count
# NOTE: argsort() sorts nans to the end of the array.
idx = a.argsort()
# Set values at indices below the min percentile to the value of the entry
# at the cutoff.
if min_percentile > 0:
lower_cutoff = int(min_percentile * nonnan_count)
a[idx[:lower_cutoff]] = a[idx[lower_cutoff]]
# Set values at indices above the max percentile to the value of the entry
# at the cutoff.
if max_percentile < 1:
upper_cutoff = int(ceil(nonnan_count * max_percentile))
# if max_percentile is close to 1, then upper_cutoff might not
# remove any values.
if upper_cutoff < nonnan_count:
start_of_nans = (-nan_count) if nan_count else None
a[idx[upper_cutoff:start_of_nans]] = a[idx[upper_cutoff - 1]]
return a | [
"def",
"winsorize",
"(",
"row",
",",
"min_percentile",
",",
"max_percentile",
")",
":",
"a",
"=",
"row",
".",
"copy",
"(",
")",
"nan_count",
"=",
"isnan",
"(",
"row",
")",
".",
"sum",
"(",
")",
"nonnan_count",
"=",
"a",
".",
"size",
"-",
"nan_count",
"idx",
"=",
"a",
".",
"argsort",
"(",
")",
"if",
"min_percentile",
">",
"0",
":",
"lower_cutoff",
"=",
"int",
"(",
"min_percentile",
"*",
"nonnan_count",
")",
"a",
"[",
"idx",
"[",
":",
"lower_cutoff",
"]",
"]",
"=",
"a",
"[",
"idx",
"[",
"lower_cutoff",
"]",
"]",
"if",
"max_percentile",
"<",
"1",
":",
"upper_cutoff",
"=",
"int",
"(",
"ceil",
"(",
"nonnan_count",
"*",
"max_percentile",
")",
")",
"if",
"upper_cutoff",
"<",
"nonnan_count",
":",
"start_of_nans",
"=",
"(",
"-",
"nan_count",
")",
"if",
"nan_count",
"else",
"None",
"a",
"[",
"idx",
"[",
"upper_cutoff",
":",
"start_of_nans",
"]",
"]",
"=",
"a",
"[",
"idx",
"[",
"upper_cutoff",
"-",
"1",
"]",
"]",
"return",
"a"
] | python | This implementation is based on scipy.stats.mstats.winsorize | false |
1,636,680 | def create_empty_crl(
ca_name,
cacert_path=None,
ca_filename=None,
crl_file=None,
digest='sha256'):
'''
Create an empty Certificate Revocation List.
.. versionadded:: 2015.8.0
ca_name
name of the CA
cacert_path
absolute path to ca certificates root directory
ca_filename
alternative filename for the CA
.. versionadded:: 2015.5.3
crl_file
full path to the CRL file
digest
The message digest algorithm. Must be a string describing a digest
algorithm supported by OpenSSL (by EVP_get_digestbyname, specifically).
For example, "md5" or "sha1". Default: 'sha256'
CLI Example:
.. code-block:: bash
salt '*' tls.create_empty_crl ca_name='koji' \
ca_filename='ca' \
crl_file='/etc/openvpn/team1/crl.pem'
'''
set_ca_path(cacert_path)
if not ca_filename:
ca_filename = '{0}_ca_cert'.format(ca_name)
if not crl_file:
crl_file = '{0}/{1}/crl.pem'.format(
_cert_base_path(),
ca_name
)
if os.path.exists('{0}'.format(crl_file)):
return 'CRL "{0}" already exists'.format(crl_file)
try:
with salt.utils.files.fopen('{0}/{1}/{2}.crt'.format(
cert_base_path(),
ca_name,
ca_filename)) as fp_:
ca_cert = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM,
fp_.read()
)
with salt.utils.files.fopen('{0}/{1}/{2}.key'.format(
cert_base_path(),
ca_name,
ca_filename)) as fp_:
ca_key = OpenSSL.crypto.load_privatekey(
OpenSSL.crypto.FILETYPE_PEM,
fp_.read()
)
except IOError:
return 'There is no CA named "{0}"'.format(ca_name)
crl = OpenSSL.crypto.CRL()
crl_text = crl.export(
ca_cert,
ca_key,
digest=salt.utils.stringutils.to_bytes(digest),
)
with salt.utils.files.fopen(crl_file, 'w') as f:
f.write(salt.utils.stringutils.to_str(crl_text))
return 'Created an empty CRL: "{0}"'.format(crl_file) | [
"def",
"create_empty_crl",
"(",
"ca_name",
",",
"cacert_path",
"=",
"None",
",",
"ca_filename",
"=",
"None",
",",
"crl_file",
"=",
"None",
",",
"digest",
"=",
"'sha256'",
")",
":",
"set_ca_path",
"(",
"cacert_path",
")",
"if",
"not",
"ca_filename",
":",
"ca_filename",
"=",
"'{0}_ca_cert'",
".",
"format",
"(",
"ca_name",
")",
"if",
"not",
"crl_file",
":",
"crl_file",
"=",
"'{0}/{1}/crl.pem'",
".",
"format",
"(",
"_cert_base_path",
"(",
")",
",",
"ca_name",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"'{0}'",
".",
"format",
"(",
"crl_file",
")",
")",
":",
"return",
"'CRL \"{0}\" already exists'",
".",
"format",
"(",
"crl_file",
")",
"try",
":",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"'{0}/{1}/{2}.crt'",
".",
"format",
"(",
"cert_base_path",
"(",
")",
",",
"ca_name",
",",
"ca_filename",
")",
")",
"as",
"fp_",
":",
"ca_cert",
"=",
"OpenSSL",
".",
"crypto",
".",
"load_certificate",
"(",
"OpenSSL",
".",
"crypto",
".",
"FILETYPE_PEM",
",",
"fp_",
".",
"read",
"(",
")",
")",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"'{0}/{1}/{2}.key'",
".",
"format",
"(",
"cert_base_path",
"(",
")",
",",
"ca_name",
",",
"ca_filename",
")",
")",
"as",
"fp_",
":",
"ca_key",
"=",
"OpenSSL",
".",
"crypto",
".",
"load_privatekey",
"(",
"OpenSSL",
".",
"crypto",
".",
"FILETYPE_PEM",
",",
"fp_",
".",
"read",
"(",
")",
")",
"except",
"IOError",
":",
"return",
"'There is no CA named \"{0}\"'",
".",
"format",
"(",
"ca_name",
")",
"crl",
"=",
"OpenSSL",
".",
"crypto",
".",
"CRL",
"(",
")",
"crl_text",
"=",
"crl",
".",
"export",
"(",
"ca_cert",
",",
"ca_key",
",",
"digest",
"=",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_bytes",
"(",
"digest",
")",
",",
")",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"crl_file",
",",
"'w'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_str",
"(",
"crl_text",
")",
")",
"return",
"'Created an empty CRL: \"{0}\"'",
".",
"format",
"(",
"crl_file",
")"
] | python | Create an empty Certificate Revocation List.
.. versionadded:: 2015.8.0
ca_name
name of the CA
cacert_path
absolute path to ca certificates root directory
ca_filename
alternative filename for the CA
.. versionadded:: 2015.5.3
crl_file
full path to the CRL file
digest
The message digest algorithm. Must be a string describing a digest
algorithm supported by OpenSSL (by EVP_get_digestbyname, specifically).
For example, "md5" or "sha1". Default: 'sha256'
CLI Example:
.. code-block:: bash
salt '*' tls.create_empty_crl ca_name='koji' \
ca_filename='ca' \
crl_file='/etc/openvpn/team1/crl.pem' | false |
2,403,720 | def add_native_name(self, name):
"""Add native name.
Args:
:param name: native name for the current author.
:type name: string
"""
self._ensure_field('name', {})
self.obj['name'].setdefault('native_names', []).append(name) | [
"def",
"add_native_name",
"(",
"self",
",",
"name",
")",
":",
"self",
".",
"_ensure_field",
"(",
"'name'",
",",
"{",
"}",
")",
"self",
".",
"obj",
"[",
"'name'",
"]",
".",
"setdefault",
"(",
"'native_names'",
",",
"[",
"]",
")",
".",
"append",
"(",
"name",
")"
] | python | Add native name.
Args:
:param name: native name for the current author.
:type name: string | false |
2,678,792 | def mixin_class(target, cls):
"""Mix cls content in target."""
for name, field in getmembers(cls):
Mixin.mixin(target, field, name) | [
"def",
"mixin_class",
"(",
"target",
",",
"cls",
")",
":",
"for",
"name",
",",
"field",
"in",
"getmembers",
"(",
"cls",
")",
":",
"Mixin",
".",
"mixin",
"(",
"target",
",",
"field",
",",
"name",
")"
] | python | Mix cls content in target. | false |
2,083,001 | def get_descriptor_defaults(self, api_info, hostname=None, x_google_api_name=False):
"""Gets a default configuration for a service.
Args:
api_info: _ApiInfo object for this service.
hostname: string, Hostname of the API, to override the value set on the
current service. Defaults to None.
Returns:
A dictionary with the default configuration.
"""
hostname = (hostname or util.get_app_hostname() or
api_info.hostname)
protocol = 'http' if ((hostname and hostname.startswith('localhost')) or
util.is_running_on_devserver()) else 'https'
base_path = api_info.base_path
if base_path != '/':
base_path = base_path.rstrip('/')
defaults = {
'swagger': '2.0',
'info': {
'version': api_info.api_version,
'title': api_info.name
},
'host': hostname,
'consumes': ['application/json'],
'produces': ['application/json'],
'schemes': [protocol],
'basePath': base_path,
}
if x_google_api_name:
defaults['x-google-api-name'] = _validate_api_name(api_info.name)
return defaults | [
"def",
"get_descriptor_defaults",
"(",
"self",
",",
"api_info",
",",
"hostname",
"=",
"None",
",",
"x_google_api_name",
"=",
"False",
")",
":",
"hostname",
"=",
"(",
"hostname",
"or",
"util",
".",
"get_app_hostname",
"(",
")",
"or",
"api_info",
".",
"hostname",
")",
"protocol",
"=",
"'http'",
"if",
"(",
"(",
"hostname",
"and",
"hostname",
".",
"startswith",
"(",
"'localhost'",
")",
")",
"or",
"util",
".",
"is_running_on_devserver",
"(",
")",
")",
"else",
"'https'",
"base_path",
"=",
"api_info",
".",
"base_path",
"if",
"base_path",
"!=",
"'/'",
":",
"base_path",
"=",
"base_path",
".",
"rstrip",
"(",
"'/'",
")",
"defaults",
"=",
"{",
"'swagger'",
":",
"'2.0'",
",",
"'info'",
":",
"{",
"'version'",
":",
"api_info",
".",
"api_version",
",",
"'title'",
":",
"api_info",
".",
"name",
"}",
",",
"'host'",
":",
"hostname",
",",
"'consumes'",
":",
"[",
"'application/json'",
"]",
",",
"'produces'",
":",
"[",
"'application/json'",
"]",
",",
"'schemes'",
":",
"[",
"protocol",
"]",
",",
"'basePath'",
":",
"base_path",
",",
"}",
"if",
"x_google_api_name",
":",
"defaults",
"[",
"'x-google-api-name'",
"]",
"=",
"_validate_api_name",
"(",
"api_info",
".",
"name",
")",
"return",
"defaults"
] | python | Gets a default configuration for a service.
Args:
api_info: _ApiInfo object for this service.
hostname: string, Hostname of the API, to override the value set on the
current service. Defaults to None.
Returns:
A dictionary with the default configuration. | false |
1,572,119 | def infer_dtype_from_scalar(val, pandas_dtype=False):
"""
interpret the dtype from a scalar
Parameters
----------
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar belongs to pandas extension types is inferred as
object
"""
dtype = np.object_
# a 1-element ndarray
if isinstance(val, np.ndarray):
msg = "invalid ndarray passed to infer_dtype_from_scalar"
if val.ndim != 0:
raise ValueError(msg)
dtype = val.dtype
val = val.item()
elif isinstance(val, str):
# If we create an empty array using a string to infer
# the dtype, NumPy will only allocate one character per entry
# so this is kind of bad. Alternately we could use np.repeat
# instead of np.empty (but then you still don't want things
# coming out as np.str_!
dtype = np.object_
elif isinstance(val, (np.datetime64, datetime)):
val = tslibs.Timestamp(val)
if val is tslibs.NaT or val.tz is None:
dtype = np.dtype('M8[ns]')
else:
if pandas_dtype:
dtype = DatetimeTZDtype(unit='ns', tz=val.tz)
else:
# return datetimetz as object
return np.object_, val
val = val.value
elif isinstance(val, (np.timedelta64, timedelta)):
val = tslibs.Timedelta(val).value
dtype = np.dtype('m8[ns]')
elif is_bool(val):
dtype = np.bool_
elif is_integer(val):
if isinstance(val, np.integer):
dtype = type(val)
else:
dtype = np.int64
elif is_float(val):
if isinstance(val, np.floating):
dtype = type(val)
else:
dtype = np.float64
elif is_complex(val):
dtype = np.complex_
elif pandas_dtype:
if lib.is_period(val):
dtype = PeriodDtype(freq=val.freq)
val = val.ordinal
return dtype, val | [
"def",
"infer_dtype_from_scalar",
"(",
"val",
",",
"pandas_dtype",
"=",
"False",
")",
":",
"dtype",
"=",
"np",
".",
"object_",
"if",
"isinstance",
"(",
"val",
",",
"np",
".",
"ndarray",
")",
":",
"msg",
"=",
"\"invalid ndarray passed to infer_dtype_from_scalar\"",
"if",
"val",
".",
"ndim",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"msg",
")",
"dtype",
"=",
"val",
".",
"dtype",
"val",
"=",
"val",
".",
"item",
"(",
")",
"elif",
"isinstance",
"(",
"val",
",",
"str",
")",
":",
"dtype",
"=",
"np",
".",
"object_",
"elif",
"isinstance",
"(",
"val",
",",
"(",
"np",
".",
"datetime64",
",",
"datetime",
")",
")",
":",
"val",
"=",
"tslibs",
".",
"Timestamp",
"(",
"val",
")",
"if",
"val",
"is",
"tslibs",
".",
"NaT",
"or",
"val",
".",
"tz",
"is",
"None",
":",
"dtype",
"=",
"np",
".",
"dtype",
"(",
"'M8[ns]'",
")",
"else",
":",
"if",
"pandas_dtype",
":",
"dtype",
"=",
"DatetimeTZDtype",
"(",
"unit",
"=",
"'ns'",
",",
"tz",
"=",
"val",
".",
"tz",
")",
"else",
":",
"return",
"np",
".",
"object_",
",",
"val",
"val",
"=",
"val",
".",
"value",
"elif",
"isinstance",
"(",
"val",
",",
"(",
"np",
".",
"timedelta64",
",",
"timedelta",
")",
")",
":",
"val",
"=",
"tslibs",
".",
"Timedelta",
"(",
"val",
")",
".",
"value",
"dtype",
"=",
"np",
".",
"dtype",
"(",
"'m8[ns]'",
")",
"elif",
"is_bool",
"(",
"val",
")",
":",
"dtype",
"=",
"np",
".",
"bool_",
"elif",
"is_integer",
"(",
"val",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"np",
".",
"integer",
")",
":",
"dtype",
"=",
"type",
"(",
"val",
")",
"else",
":",
"dtype",
"=",
"np",
".",
"int64",
"elif",
"is_float",
"(",
"val",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"np",
".",
"floating",
")",
":",
"dtype",
"=",
"type",
"(",
"val",
")",
"else",
":",
"dtype",
"=",
"np",
".",
"float64",
"elif",
"is_complex",
"(",
"val",
")",
":",
"dtype",
"=",
"np",
".",
"complex_",
"elif",
"pandas_dtype",
":",
"if",
"lib",
".",
"is_period",
"(",
"val",
")",
":",
"dtype",
"=",
"PeriodDtype",
"(",
"freq",
"=",
"val",
".",
"freq",
")",
"val",
"=",
"val",
".",
"ordinal",
"return",
"dtype",
",",
"val"
] | python | interpret the dtype from a scalar
Parameters
----------
pandas_dtype : bool, default False
whether to infer dtype including pandas extension types.
If False, scalar belongs to pandas extension types is inferred as
object | false |
1,680,824 | def parse_auth(cls, entries, raise_on_error=False):
"""
Parses authentication entries
Args:
entries: Dict of authentication entries.
raise_on_error: If set to true, an invalid format will raise
InvalidConfigFile
Returns:
Authentication registry.
"""
conf = {}
for registry, entry in six.iteritems(entries):
if not isinstance(entry, dict):
log.debug(
'Config entry for key {0} is not auth config'.format(
registry
)
)
# We sometimes fall back to parsing the whole config as if it
# was the auth config by itself, for legacy purposes. In that
# case, we fail silently and return an empty conf if any of the
# keys is not formatted properly.
if raise_on_error:
raise errors.InvalidConfigFile(
'Invalid configuration for registry {0}'.format(
registry
)
)
return {}
if 'identitytoken' in entry:
log.debug(
'Found an IdentityToken entry for registry {0}'.format(
registry
)
)
conf[registry] = {
'IdentityToken': entry['identitytoken']
}
continue # Other values are irrelevant if we have a token
if 'auth' not in entry:
# Starting with engine v1.11 (API 1.23), an empty dictionary is
# a valid value in the auths config.
# https://github.com/docker/compose/issues/3265
log.debug(
'Auth data for {0} is absent. Client might be using a '
'credentials store instead.'.format(registry)
)
conf[registry] = {}
continue
username, password = decode_auth(entry['auth'])
log.debug(
'Found entry (registry={0}, username={1})'
.format(repr(registry), repr(username))
)
conf[registry] = {
'username': username,
'password': password,
'email': entry.get('email'),
'serveraddress': registry,
}
return conf | [
"def",
"parse_auth",
"(",
"cls",
",",
"entries",
",",
"raise_on_error",
"=",
"False",
")",
":",
"conf",
"=",
"{",
"}",
"for",
"registry",
",",
"entry",
"in",
"six",
".",
"iteritems",
"(",
"entries",
")",
":",
"if",
"not",
"isinstance",
"(",
"entry",
",",
"dict",
")",
":",
"log",
".",
"debug",
"(",
"'Config entry for key {0} is not auth config'",
".",
"format",
"(",
"registry",
")",
")",
"if",
"raise_on_error",
":",
"raise",
"errors",
".",
"InvalidConfigFile",
"(",
"'Invalid configuration for registry {0}'",
".",
"format",
"(",
"registry",
")",
")",
"return",
"{",
"}",
"if",
"'identitytoken'",
"in",
"entry",
":",
"log",
".",
"debug",
"(",
"'Found an IdentityToken entry for registry {0}'",
".",
"format",
"(",
"registry",
")",
")",
"conf",
"[",
"registry",
"]",
"=",
"{",
"'IdentityToken'",
":",
"entry",
"[",
"'identitytoken'",
"]",
"}",
"continue",
"if",
"'auth'",
"not",
"in",
"entry",
":",
"log",
".",
"debug",
"(",
"'Auth data for {0} is absent. Client might be using a '",
"'credentials store instead.'",
".",
"format",
"(",
"registry",
")",
")",
"conf",
"[",
"registry",
"]",
"=",
"{",
"}",
"continue",
"username",
",",
"password",
"=",
"decode_auth",
"(",
"entry",
"[",
"'auth'",
"]",
")",
"log",
".",
"debug",
"(",
"'Found entry (registry={0}, username={1})'",
".",
"format",
"(",
"repr",
"(",
"registry",
")",
",",
"repr",
"(",
"username",
")",
")",
")",
"conf",
"[",
"registry",
"]",
"=",
"{",
"'username'",
":",
"username",
",",
"'password'",
":",
"password",
",",
"'email'",
":",
"entry",
".",
"get",
"(",
"'email'",
")",
",",
"'serveraddress'",
":",
"registry",
",",
"}",
"return",
"conf"
] | python | Parses authentication entries
Args:
entries: Dict of authentication entries.
raise_on_error: If set to true, an invalid format will raise
InvalidConfigFile
Returns:
Authentication registry. | false |
1,760,682 | def connect(cls, resource):
"""Returns a connection object pointing to the endpoint
associated to the received resource.
"""
from azure import storage as azure_storage
file_info = cls.parse_remote(resource)
return azure_storage.BlobService(file_info.storage) | [
"def",
"connect",
"(",
"cls",
",",
"resource",
")",
":",
"from",
"azure",
"import",
"storage",
"as",
"azure_storage",
"file_info",
"=",
"cls",
".",
"parse_remote",
"(",
"resource",
")",
"return",
"azure_storage",
".",
"BlobService",
"(",
"file_info",
".",
"storage",
")"
] | python | Returns a connection object pointing to the endpoint
associated to the received resource. | false |
2,180,186 | def neighborhood(self, elements: List[ELEMENT_NAME]) \
-> References:
""" Return a list of all slots, classes and types that touch any element in elements, including the element
itself
@param elements: Elements to do proximity with
@return: All slots and classes that touch element
"""
touches = References()
for element in elements:
if element in self.schema.classes:
touches.classrefs.add(element)
if None in touches.classrefs:
raise ValueError("1")
cls = self.schema.classes[element]
if cls.is_a:
touches.classrefs.add(cls.is_a)
if None in touches.classrefs:
raise ValueError("1")
# Mixins include apply_to's
touches.classrefs.update(set(cls.mixins))
for slotname in cls.slots:
slot = self.schema.slots[slotname]
if slot.range in self.schema.classes:
touches.classrefs.add(slot.range)
elif slot.range in self.schema.types:
touches.typerefs.add(slot.range)
if None in touches.classrefs:
raise ValueError("1")
if element in self.synopsis.rangerefs:
for slotname in self.synopsis.rangerefs[element]:
touches.slotrefs.add(slotname)
if self.schema.slots[slotname].domain:
touches.classrefs.add(self.schema.slots[slotname].domain)
elif element in self.schema.slots:
touches.slotrefs.add(element)
slot = self.schema.slots[element]
touches.slotrefs.update(set(slot.mixins))
if slot.is_a:
touches.slotrefs.add(slot.is_a)
if element in self.synopsis.inverses:
touches.slotrefs.update(self.synopsis.inverses[element])
if slot.domain:
touches.classrefs.add(slot.domain)
if slot.range in self.schema.classes:
touches.classrefs.add(slot.range)
elif slot.range in self.schema.types:
touches.typerefs.add(slot.range)
elif element in self.schema.types:
if element in self.synopsis.rangerefs:
touches.slotrefs.update(self.synopsis.rangerefs[element])
return touches | [
"def",
"neighborhood",
"(",
"self",
",",
"elements",
":",
"List",
"[",
"ELEMENT_NAME",
"]",
")",
"->",
"References",
":",
"touches",
"=",
"References",
"(",
")",
"for",
"element",
"in",
"elements",
":",
"if",
"element",
"in",
"self",
".",
"schema",
".",
"classes",
":",
"touches",
".",
"classrefs",
".",
"add",
"(",
"element",
")",
"if",
"None",
"in",
"touches",
".",
"classrefs",
":",
"raise",
"ValueError",
"(",
"\"1\"",
")",
"cls",
"=",
"self",
".",
"schema",
".",
"classes",
"[",
"element",
"]",
"if",
"cls",
".",
"is_a",
":",
"touches",
".",
"classrefs",
".",
"add",
"(",
"cls",
".",
"is_a",
")",
"if",
"None",
"in",
"touches",
".",
"classrefs",
":",
"raise",
"ValueError",
"(",
"\"1\"",
")",
"touches",
".",
"classrefs",
".",
"update",
"(",
"set",
"(",
"cls",
".",
"mixins",
")",
")",
"for",
"slotname",
"in",
"cls",
".",
"slots",
":",
"slot",
"=",
"self",
".",
"schema",
".",
"slots",
"[",
"slotname",
"]",
"if",
"slot",
".",
"range",
"in",
"self",
".",
"schema",
".",
"classes",
":",
"touches",
".",
"classrefs",
".",
"add",
"(",
"slot",
".",
"range",
")",
"elif",
"slot",
".",
"range",
"in",
"self",
".",
"schema",
".",
"types",
":",
"touches",
".",
"typerefs",
".",
"add",
"(",
"slot",
".",
"range",
")",
"if",
"None",
"in",
"touches",
".",
"classrefs",
":",
"raise",
"ValueError",
"(",
"\"1\"",
")",
"if",
"element",
"in",
"self",
".",
"synopsis",
".",
"rangerefs",
":",
"for",
"slotname",
"in",
"self",
".",
"synopsis",
".",
"rangerefs",
"[",
"element",
"]",
":",
"touches",
".",
"slotrefs",
".",
"add",
"(",
"slotname",
")",
"if",
"self",
".",
"schema",
".",
"slots",
"[",
"slotname",
"]",
".",
"domain",
":",
"touches",
".",
"classrefs",
".",
"add",
"(",
"self",
".",
"schema",
".",
"slots",
"[",
"slotname",
"]",
".",
"domain",
")",
"elif",
"element",
"in",
"self",
".",
"schema",
".",
"slots",
":",
"touches",
".",
"slotrefs",
".",
"add",
"(",
"element",
")",
"slot",
"=",
"self",
".",
"schema",
".",
"slots",
"[",
"element",
"]",
"touches",
".",
"slotrefs",
".",
"update",
"(",
"set",
"(",
"slot",
".",
"mixins",
")",
")",
"if",
"slot",
".",
"is_a",
":",
"touches",
".",
"slotrefs",
".",
"add",
"(",
"slot",
".",
"is_a",
")",
"if",
"element",
"in",
"self",
".",
"synopsis",
".",
"inverses",
":",
"touches",
".",
"slotrefs",
".",
"update",
"(",
"self",
".",
"synopsis",
".",
"inverses",
"[",
"element",
"]",
")",
"if",
"slot",
".",
"domain",
":",
"touches",
".",
"classrefs",
".",
"add",
"(",
"slot",
".",
"domain",
")",
"if",
"slot",
".",
"range",
"in",
"self",
".",
"schema",
".",
"classes",
":",
"touches",
".",
"classrefs",
".",
"add",
"(",
"slot",
".",
"range",
")",
"elif",
"slot",
".",
"range",
"in",
"self",
".",
"schema",
".",
"types",
":",
"touches",
".",
"typerefs",
".",
"add",
"(",
"slot",
".",
"range",
")",
"elif",
"element",
"in",
"self",
".",
"schema",
".",
"types",
":",
"if",
"element",
"in",
"self",
".",
"synopsis",
".",
"rangerefs",
":",
"touches",
".",
"slotrefs",
".",
"update",
"(",
"self",
".",
"synopsis",
".",
"rangerefs",
"[",
"element",
"]",
")",
"return",
"touches"
] | python | Return a list of all slots, classes and types that touch any element in elements, including the element
itself
@param elements: Elements to do proximity with
@return: All slots and classes that touch element | false |
1,984,481 | def directp(node0, node1, node2, node3, hypocenter, reference, pp):
"""
Get the Direct Point and the corresponding E-path as described in
Spudich et al. (2013). This method also provides a logical variable
stating if the DPP calculation must consider the neighbouring patch.
To define the intersection point(Pd) of PpPh line segment and fault plane,
we obtain the intersection points(Pd) with each side of fault plan, and
check which intersection point(Pd) is the one fitting the definition in
the Chiou and Spudich(2014) directivity model.
Two possible locations for Pd, the first case, Pd locates on the side of
the fault patch when Pp is not inside the fault patch. The second case is
when Pp is inside the fault patch, then Pd=Pp.
For the first case, it follows three conditions:
1. the PpPh and PdPh line vector are the same,
2. PpPh >= PdPh,
3. Pd is not inside the fault patch.
If we can not find solution for all the four possible intersection points
for the first case, we check if the intersection point fit the second case
by checking if Pp is inside the fault patch.
Because of the coordinate system mapping(from geographic system to
Catestian system), we allow an error when we check the location. The allow
error will keep increasing after each loop when no solution in the two
cases are found, until the solution get obtained.
:param node0:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of one vertices on the target fault
segment.
:param node1:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of one vertices on the target fault
segment. Note, the order should be clockwise.
:param node2:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of one vertices on the target fault
segment. Note, the order should be clockwise.
:param node3:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of one vertices on the target fault
segment. Note, the order should be clockwise.
:param hypocenter:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of floating hypocenter on each segment
calculation. In the method, we take the direction point of the
previous fault patch as hypocentre for the current fault patch.
:param reference:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of reference point for projection
:param pp:
the projection of the site onto the plane containing the fault
slipped area. A numpy array.
:returns:
Pd, a numpy array, representing the location of direction point
E, the distance from direction point to hypocentre.
go_next_patch, flag indicates if the calculation goes on the next
fault patch. 1: yes, 0: no.
"""
# Find the intersection point Pd, by checking if the PdPh share the
# same vector with PpPh, and PpPh >= PdPh
# Transform to xyz coordinate
node0_xyz = get_xyz_from_ll(node0, reference)
node1_xyz = get_xyz_from_ll(node1, reference)
node2_xyz = get_xyz_from_ll(node2, reference)
node3_xyz = get_xyz_from_ll(node3, reference)
hypocenter_xyz = get_xyz_from_ll(hypocenter, reference)
hypocenter_xyz = np.array(hypocenter_xyz).flatten()
pp_xyz = pp
e = []
# Loop each segments on the patch to find Pd
segment_s = [node0_xyz, node1_xyz, node2_xyz, node3_xyz]
segment_e = [node1_xyz, node2_xyz, node3_xyz, node0_xyz]
# set buffering bu
buf = 0.0001
atol = 0.0001
loop = True
exit_flag = False
looptime = 0.
while loop:
x_min = np.min(np.array([node0_xyz[0], node1_xyz[0], node2_xyz[0],
node3_xyz[0]])) - buf
x_max = np.max(np.array([node0_xyz[0], node1_xyz[0], node2_xyz[0],
node3_xyz[0]])) + buf
y_min = np.min(np.array([node0_xyz[1], node1_xyz[1], node2_xyz[1],
node3_xyz[1]])) - buf
y_max = np.max(np.array([node0_xyz[1], node1_xyz[1], node2_xyz[1],
node3_xyz[1]])) + buf
n_seg = 0
exit_flag = False
for (seg_s, seg_e) in zip(segment_s, segment_e):
seg_s = np.array(seg_s).flatten()
seg_e = np.array(seg_e).flatten()
p_intersect, vector1, vector2, vector3, vector4 = _intersection(
seg_s, seg_e, pp_xyz, hypocenter_xyz)
ppph = dst.pdist([pp, hypocenter_xyz])
pdph = dst.pdist([p_intersect.flatten(), hypocenter_xyz])
n_seg = n_seg + 1
# Check that the direction of the hyp-pp and hyp-pd vectors
# have are the same.
if (np.allclose(vector1.flatten(), vector2,
atol=atol, rtol=0.)):
if ((np.allclose(vector3.flatten(), vector4, atol=atol,
rtol=0.))):
# Check if ppph >= pdph.
if (ppph >= pdph):
if (p_intersect[0] >= x_min) & (p_intersect[0] <=
x_max):
if (p_intersect[1] >= y_min) & (p_intersect[1]
<= y_max):
e = pdph
pd = p_intersect
exit_flag = True
break
# when the pp located within the fault rupture plane, e = ppph
if not e:
if (pp_xyz[0] >= x_min) & (pp_xyz[0] <= x_max):
if (pp_xyz[1] >= y_min) & (pp_xyz[1] <= y_max):
pd = pp_xyz
e = ppph
exit_flag = True
if exit_flag:
break
if not e:
looptime += 1
atol = 0.0001 * looptime
buf = 0.0001 * looptime
# if pd is located at 2nd fault segment, then the DPP calculation will
# keep going on the next fault patch
if n_seg == 2:
go_next_patch = True
else:
go_next_patch = False
return pd, e, go_next_patch | [
"def",
"directp",
"(",
"node0",
",",
"node1",
",",
"node2",
",",
"node3",
",",
"hypocenter",
",",
"reference",
",",
"pp",
")",
":",
"node0_xyz",
"=",
"get_xyz_from_ll",
"(",
"node0",
",",
"reference",
")",
"node1_xyz",
"=",
"get_xyz_from_ll",
"(",
"node1",
",",
"reference",
")",
"node2_xyz",
"=",
"get_xyz_from_ll",
"(",
"node2",
",",
"reference",
")",
"node3_xyz",
"=",
"get_xyz_from_ll",
"(",
"node3",
",",
"reference",
")",
"hypocenter_xyz",
"=",
"get_xyz_from_ll",
"(",
"hypocenter",
",",
"reference",
")",
"hypocenter_xyz",
"=",
"np",
".",
"array",
"(",
"hypocenter_xyz",
")",
".",
"flatten",
"(",
")",
"pp_xyz",
"=",
"pp",
"e",
"=",
"[",
"]",
"segment_s",
"=",
"[",
"node0_xyz",
",",
"node1_xyz",
",",
"node2_xyz",
",",
"node3_xyz",
"]",
"segment_e",
"=",
"[",
"node1_xyz",
",",
"node2_xyz",
",",
"node3_xyz",
",",
"node0_xyz",
"]",
"buf",
"=",
"0.0001",
"atol",
"=",
"0.0001",
"loop",
"=",
"True",
"exit_flag",
"=",
"False",
"looptime",
"=",
"0.",
"while",
"loop",
":",
"x_min",
"=",
"np",
".",
"min",
"(",
"np",
".",
"array",
"(",
"[",
"node0_xyz",
"[",
"0",
"]",
",",
"node1_xyz",
"[",
"0",
"]",
",",
"node2_xyz",
"[",
"0",
"]",
",",
"node3_xyz",
"[",
"0",
"]",
"]",
")",
")",
"-",
"buf",
"x_max",
"=",
"np",
".",
"max",
"(",
"np",
".",
"array",
"(",
"[",
"node0_xyz",
"[",
"0",
"]",
",",
"node1_xyz",
"[",
"0",
"]",
",",
"node2_xyz",
"[",
"0",
"]",
",",
"node3_xyz",
"[",
"0",
"]",
"]",
")",
")",
"+",
"buf",
"y_min",
"=",
"np",
".",
"min",
"(",
"np",
".",
"array",
"(",
"[",
"node0_xyz",
"[",
"1",
"]",
",",
"node1_xyz",
"[",
"1",
"]",
",",
"node2_xyz",
"[",
"1",
"]",
",",
"node3_xyz",
"[",
"1",
"]",
"]",
")",
")",
"-",
"buf",
"y_max",
"=",
"np",
".",
"max",
"(",
"np",
".",
"array",
"(",
"[",
"node0_xyz",
"[",
"1",
"]",
",",
"node1_xyz",
"[",
"1",
"]",
",",
"node2_xyz",
"[",
"1",
"]",
",",
"node3_xyz",
"[",
"1",
"]",
"]",
")",
")",
"+",
"buf",
"n_seg",
"=",
"0",
"exit_flag",
"=",
"False",
"for",
"(",
"seg_s",
",",
"seg_e",
")",
"in",
"zip",
"(",
"segment_s",
",",
"segment_e",
")",
":",
"seg_s",
"=",
"np",
".",
"array",
"(",
"seg_s",
")",
".",
"flatten",
"(",
")",
"seg_e",
"=",
"np",
".",
"array",
"(",
"seg_e",
")",
".",
"flatten",
"(",
")",
"p_intersect",
",",
"vector1",
",",
"vector2",
",",
"vector3",
",",
"vector4",
"=",
"_intersection",
"(",
"seg_s",
",",
"seg_e",
",",
"pp_xyz",
",",
"hypocenter_xyz",
")",
"ppph",
"=",
"dst",
".",
"pdist",
"(",
"[",
"pp",
",",
"hypocenter_xyz",
"]",
")",
"pdph",
"=",
"dst",
".",
"pdist",
"(",
"[",
"p_intersect",
".",
"flatten",
"(",
")",
",",
"hypocenter_xyz",
"]",
")",
"n_seg",
"=",
"n_seg",
"+",
"1",
"if",
"(",
"np",
".",
"allclose",
"(",
"vector1",
".",
"flatten",
"(",
")",
",",
"vector2",
",",
"atol",
"=",
"atol",
",",
"rtol",
"=",
"0.",
")",
")",
":",
"if",
"(",
"(",
"np",
".",
"allclose",
"(",
"vector3",
".",
"flatten",
"(",
")",
",",
"vector4",
",",
"atol",
"=",
"atol",
",",
"rtol",
"=",
"0.",
")",
")",
")",
":",
"if",
"(",
"ppph",
">=",
"pdph",
")",
":",
"if",
"(",
"p_intersect",
"[",
"0",
"]",
">=",
"x_min",
")",
"&",
"(",
"p_intersect",
"[",
"0",
"]",
"<=",
"x_max",
")",
":",
"if",
"(",
"p_intersect",
"[",
"1",
"]",
">=",
"y_min",
")",
"&",
"(",
"p_intersect",
"[",
"1",
"]",
"<=",
"y_max",
")",
":",
"e",
"=",
"pdph",
"pd",
"=",
"p_intersect",
"exit_flag",
"=",
"True",
"break",
"if",
"not",
"e",
":",
"if",
"(",
"pp_xyz",
"[",
"0",
"]",
">=",
"x_min",
")",
"&",
"(",
"pp_xyz",
"[",
"0",
"]",
"<=",
"x_max",
")",
":",
"if",
"(",
"pp_xyz",
"[",
"1",
"]",
">=",
"y_min",
")",
"&",
"(",
"pp_xyz",
"[",
"1",
"]",
"<=",
"y_max",
")",
":",
"pd",
"=",
"pp_xyz",
"e",
"=",
"ppph",
"exit_flag",
"=",
"True",
"if",
"exit_flag",
":",
"break",
"if",
"not",
"e",
":",
"looptime",
"+=",
"1",
"atol",
"=",
"0.0001",
"*",
"looptime",
"buf",
"=",
"0.0001",
"*",
"looptime",
"if",
"n_seg",
"==",
"2",
":",
"go_next_patch",
"=",
"True",
"else",
":",
"go_next_patch",
"=",
"False",
"return",
"pd",
",",
"e",
",",
"go_next_patch"
] | python | Get the Direct Point and the corresponding E-path as described in
Spudich et al. (2013). This method also provides a logical variable
stating if the DPP calculation must consider the neighbouring patch.
To define the intersection point(Pd) of PpPh line segment and fault plane,
we obtain the intersection points(Pd) with each side of fault plan, and
check which intersection point(Pd) is the one fitting the definition in
the Chiou and Spudich(2014) directivity model.
Two possible locations for Pd, the first case, Pd locates on the side of
the fault patch when Pp is not inside the fault patch. The second case is
when Pp is inside the fault patch, then Pd=Pp.
For the first case, it follows three conditions:
1. the PpPh and PdPh line vector are the same,
2. PpPh >= PdPh,
3. Pd is not inside the fault patch.
If we can not find solution for all the four possible intersection points
for the first case, we check if the intersection point fit the second case
by checking if Pp is inside the fault patch.
Because of the coordinate system mapping(from geographic system to
Catestian system), we allow an error when we check the location. The allow
error will keep increasing after each loop when no solution in the two
cases are found, until the solution get obtained.
:param node0:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of one vertices on the target fault
segment.
:param node1:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of one vertices on the target fault
segment. Note, the order should be clockwise.
:param node2:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of one vertices on the target fault
segment. Note, the order should be clockwise.
:param node3:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of one vertices on the target fault
segment. Note, the order should be clockwise.
:param hypocenter:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of floating hypocenter on each segment
calculation. In the method, we take the direction point of the
previous fault patch as hypocentre for the current fault patch.
:param reference:
:class:`~openquake.hazardlib.geo.point.Point` object
representing the location of reference point for projection
:param pp:
the projection of the site onto the plane containing the fault
slipped area. A numpy array.
:returns:
Pd, a numpy array, representing the location of direction point
E, the distance from direction point to hypocentre.
go_next_patch, flag indicates if the calculation goes on the next
fault patch. 1: yes, 0: no. | false |
2,436,790 | def rebuild( self ):
"""
Rebuilds the grid lines based on the current settings and \
scene width. This method is triggered automatically, and \
shouldn't need to be manually called.
"""
rect = self.sceneRect()
x = rect.left()
y = rect.top()
w = rect.width()
h = rect.height()
# calculate background gridlines
cx = x + (w / 2)
cy = y + (h / 2)
self._centerLines = [QLine(cx, rect.top(), cx, rect.bottom()),
QLine(rect.left(), cy, rect.right(), cy) ]
# create the horizontal grid lines
delta = self.cellHeight()
minor_lines = []
major_lines = []
count = 1
while delta < (h / 2):
pos_line = QLine(x, cy + delta, x + w, cy + delta)
neg_line = QLine(x, cy - delta, x + w, cy - delta)
# every 10th line will be a major line
if count == 10:
major_lines.append(pos_line)
major_lines.append(neg_line)
count = 1
else:
minor_lines.append(pos_line)
minor_lines.append(neg_line)
# update the current y location
delta += self.cellHeight()
count += 1
# create the vertical grid lines
delta = self.cellWidth()
count = 1
while delta < (w / 2):
pos_line = QLine(cx + delta, y, cx + delta, y + h)
neg_line = QLine(cx - delta, y, cx - delta, y + h)
# every 10th line will be a major line
if count == 10:
major_lines.append(pos_line)
major_lines.append(neg_line)
count = 1
else:
minor_lines.append(pos_line)
minor_lines.append(neg_line)
# update the current y location
delta += self.cellWidth()
count += 1
# set the line cache
self._majorLines = major_lines
self._minorLines = minor_lines
# unmark the scene as being dirty
self.setDirty(False) | [
"def",
"rebuild",
"(",
"self",
")",
":",
"rect",
"=",
"self",
".",
"sceneRect",
"(",
")",
"x",
"=",
"rect",
".",
"left",
"(",
")",
"y",
"=",
"rect",
".",
"top",
"(",
")",
"w",
"=",
"rect",
".",
"width",
"(",
")",
"h",
"=",
"rect",
".",
"height",
"(",
")",
"cx",
"=",
"x",
"+",
"(",
"w",
"/",
"2",
")",
"cy",
"=",
"y",
"+",
"(",
"h",
"/",
"2",
")",
"self",
".",
"_centerLines",
"=",
"[",
"QLine",
"(",
"cx",
",",
"rect",
".",
"top",
"(",
")",
",",
"cx",
",",
"rect",
".",
"bottom",
"(",
")",
")",
",",
"QLine",
"(",
"rect",
".",
"left",
"(",
")",
",",
"cy",
",",
"rect",
".",
"right",
"(",
")",
",",
"cy",
")",
"]",
"delta",
"=",
"self",
".",
"cellHeight",
"(",
")",
"minor_lines",
"=",
"[",
"]",
"major_lines",
"=",
"[",
"]",
"count",
"=",
"1",
"while",
"delta",
"<",
"(",
"h",
"/",
"2",
")",
":",
"pos_line",
"=",
"QLine",
"(",
"x",
",",
"cy",
"+",
"delta",
",",
"x",
"+",
"w",
",",
"cy",
"+",
"delta",
")",
"neg_line",
"=",
"QLine",
"(",
"x",
",",
"cy",
"-",
"delta",
",",
"x",
"+",
"w",
",",
"cy",
"-",
"delta",
")",
"if",
"count",
"==",
"10",
":",
"major_lines",
".",
"append",
"(",
"pos_line",
")",
"major_lines",
".",
"append",
"(",
"neg_line",
")",
"count",
"=",
"1",
"else",
":",
"minor_lines",
".",
"append",
"(",
"pos_line",
")",
"minor_lines",
".",
"append",
"(",
"neg_line",
")",
"delta",
"+=",
"self",
".",
"cellHeight",
"(",
")",
"count",
"+=",
"1",
"delta",
"=",
"self",
".",
"cellWidth",
"(",
")",
"count",
"=",
"1",
"while",
"delta",
"<",
"(",
"w",
"/",
"2",
")",
":",
"pos_line",
"=",
"QLine",
"(",
"cx",
"+",
"delta",
",",
"y",
",",
"cx",
"+",
"delta",
",",
"y",
"+",
"h",
")",
"neg_line",
"=",
"QLine",
"(",
"cx",
"-",
"delta",
",",
"y",
",",
"cx",
"-",
"delta",
",",
"y",
"+",
"h",
")",
"if",
"count",
"==",
"10",
":",
"major_lines",
".",
"append",
"(",
"pos_line",
")",
"major_lines",
".",
"append",
"(",
"neg_line",
")",
"count",
"=",
"1",
"else",
":",
"minor_lines",
".",
"append",
"(",
"pos_line",
")",
"minor_lines",
".",
"append",
"(",
"neg_line",
")",
"delta",
"+=",
"self",
".",
"cellWidth",
"(",
")",
"count",
"+=",
"1",
"self",
".",
"_majorLines",
"=",
"major_lines",
"self",
".",
"_minorLines",
"=",
"minor_lines",
"self",
".",
"setDirty",
"(",
"False",
")"
] | python | Rebuilds the grid lines based on the current settings and \
scene width. This method is triggered automatically, and \
shouldn't need to be manually called. | false |
2,222,100 | def __init__(self, **kwargs):
"""
Keyword arguments
-------------------
basedir : str
Top level directory for finding files
"""
self._name_factory = NameFactory(**kwargs)
self._dmm = kwargs.get('DiffuseModelManager',
DiffuseModelManager(**kwargs))
self._gmm = kwargs.get('GalpropMapManager',
GalpropMapManager(**kwargs))
self._csm = kwargs.get('CatalogSourceManager',
CatalogSourceManager(**kwargs))
self._library = {}
self._models = {}
self._spec_lib = SpectralLibrary({}) | [
"def",
"__init__",
"(",
"self",
",",
"**",
"kwargs",
")",
":",
"self",
".",
"_name_factory",
"=",
"NameFactory",
"(",
"**",
"kwargs",
")",
"self",
".",
"_dmm",
"=",
"kwargs",
".",
"get",
"(",
"'DiffuseModelManager'",
",",
"DiffuseModelManager",
"(",
"**",
"kwargs",
")",
")",
"self",
".",
"_gmm",
"=",
"kwargs",
".",
"get",
"(",
"'GalpropMapManager'",
",",
"GalpropMapManager",
"(",
"**",
"kwargs",
")",
")",
"self",
".",
"_csm",
"=",
"kwargs",
".",
"get",
"(",
"'CatalogSourceManager'",
",",
"CatalogSourceManager",
"(",
"**",
"kwargs",
")",
")",
"self",
".",
"_library",
"=",
"{",
"}",
"self",
".",
"_models",
"=",
"{",
"}",
"self",
".",
"_spec_lib",
"=",
"SpectralLibrary",
"(",
"{",
"}",
")"
] | python | Keyword arguments
-------------------
basedir : str
Top level directory for finding files | false |
2,186,406 | def __call__(self, *args, **kwargs):
"""
Method call
"""
# Send a request
request = [self.__method_name]
if args:
request.extend(args)
# Keyword arguments are ignores
self.__publish(self.__uid, self, self.__topic, request)
# Wait for an answer
self._event.wait()
# Act accordingly
if self._error:
raise RemoteServiceError(self._error)
else:
return self._result | [
"def",
"__call__",
"(",
"self",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"request",
"=",
"[",
"self",
".",
"__method_name",
"]",
"if",
"args",
":",
"request",
".",
"extend",
"(",
"args",
")",
"self",
".",
"__publish",
"(",
"self",
".",
"__uid",
",",
"self",
",",
"self",
".",
"__topic",
",",
"request",
")",
"self",
".",
"_event",
".",
"wait",
"(",
")",
"if",
"self",
".",
"_error",
":",
"raise",
"RemoteServiceError",
"(",
"self",
".",
"_error",
")",
"else",
":",
"return",
"self",
".",
"_result"
] | python | Method call | false |
2,396,280 | def _set_cspf_group_computation_mode(self, v, load=False):
"""
Setter method for cspf_group_computation_mode, mapped from YANG variable /mpls_state/policy/cspf_group_computation_mode (mpls-cspf-grp-comp-mode)
If this variable is read-only (config: false) in the
source YANG file, then _set_cspf_group_computation_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cspf_group_computation_mode() directly.
YANG Description: CSPF Group Computation Mode
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'mpls-cspf-grp-comp-mode-default': {'value': 0}, u'mpls-cspf-grp-comp-mode-exclude-groups': {'value': 2}, u'mpls-cspf-grp-comp-mode-max': {'value': 4}, u'mpls-cspf-grp-comp-mode-add-penalty': {'value': 1}, u'mpls-cspf-grp-comp-mode-high-cost': {'value': 3}},), is_leaf=True, yang_name="cspf-group-computation-mode", rest_name="cspf-group-computation-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='mpls-cspf-grp-comp-mode', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cspf_group_computation_mode must be of a type compatible with mpls-cspf-grp-comp-mode""",
'defined-type': "brocade-mpls-operational:mpls-cspf-grp-comp-mode",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'mpls-cspf-grp-comp-mode-default': {'value': 0}, u'mpls-cspf-grp-comp-mode-exclude-groups': {'value': 2}, u'mpls-cspf-grp-comp-mode-max': {'value': 4}, u'mpls-cspf-grp-comp-mode-add-penalty': {'value': 1}, u'mpls-cspf-grp-comp-mode-high-cost': {'value': 3}},), is_leaf=True, yang_name="cspf-group-computation-mode", rest_name="cspf-group-computation-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='mpls-cspf-grp-comp-mode', is_config=False)""",
})
self.__cspf_group_computation_mode = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_cspf_group_computation_mode",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"RestrictedClassType",
"(",
"base_type",
"=",
"unicode",
",",
"restriction_type",
"=",
"\"dict_key\"",
",",
"restriction_arg",
"=",
"{",
"u'mpls-cspf-grp-comp-mode-default'",
":",
"{",
"'value'",
":",
"0",
"}",
",",
"u'mpls-cspf-grp-comp-mode-exclude-groups'",
":",
"{",
"'value'",
":",
"2",
"}",
",",
"u'mpls-cspf-grp-comp-mode-max'",
":",
"{",
"'value'",
":",
"4",
"}",
",",
"u'mpls-cspf-grp-comp-mode-add-penalty'",
":",
"{",
"'value'",
":",
"1",
"}",
",",
"u'mpls-cspf-grp-comp-mode-high-cost'",
":",
"{",
"'value'",
":",
"3",
"}",
"}",
",",
")",
",",
"is_leaf",
"=",
"True",
",",
"yang_name",
"=",
"\"cspf-group-computation-mode\"",
",",
"rest_name",
"=",
"\"cspf-group-computation-mode\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-mpls-operational'",
",",
"defining_module",
"=",
"'brocade-mpls-operational'",
",",
"yang_type",
"=",
"'mpls-cspf-grp-comp-mode'",
",",
"is_config",
"=",
"False",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"cspf_group_computation_mode must be of a type compatible with mpls-cspf-grp-comp-mode\"\"\"",
",",
"'defined-type'",
":",
"\"brocade-mpls-operational:mpls-cspf-grp-comp-mode\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'mpls-cspf-grp-comp-mode-default': {'value': 0}, u'mpls-cspf-grp-comp-mode-exclude-groups': {'value': 2}, u'mpls-cspf-grp-comp-mode-max': {'value': 4}, u'mpls-cspf-grp-comp-mode-add-penalty': {'value': 1}, u'mpls-cspf-grp-comp-mode-high-cost': {'value': 3}},), is_leaf=True, yang_name=\"cspf-group-computation-mode\", rest_name=\"cspf-group-computation-mode\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='mpls-cspf-grp-comp-mode', is_config=False)\"\"\"",
",",
"}",
")",
"self",
".",
"__cspf_group_computation_mode",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] | python | Setter method for cspf_group_computation_mode, mapped from YANG variable /mpls_state/policy/cspf_group_computation_mode (mpls-cspf-grp-comp-mode)
If this variable is read-only (config: false) in the
source YANG file, then _set_cspf_group_computation_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cspf_group_computation_mode() directly.
YANG Description: CSPF Group Computation Mode | false |
2,002,111 | def __init__(self, sd, id):
"""This constructor should not be called by the user program.
Call the SD.create() and SD.select() methods instead.
"""
# Args
# sd : SD instance
# id : SDS identifier
# Private attributes
# _sd SD intance
# _id SDS identifier
self._sd = sd
self._id = id | [
"def",
"__init__",
"(",
"self",
",",
"sd",
",",
"id",
")",
":",
"self",
".",
"_sd",
"=",
"sd",
"self",
".",
"_id",
"=",
"id"
] | python | This constructor should not be called by the user program.
Call the SD.create() and SD.select() methods instead. | false |