id_within_dataset
int64 46
2.71M
| snippet
stringlengths 63
481k
| tokens
sequencelengths 20
15.6k
| language
stringclasses 2
values | nl
stringlengths 1
32.4k
| is_duplicated
bool 2
classes |
---|---|---|---|---|---|
1,845,966 | def change_password(self, new_password, email):
"""
Changes the login password
:param new_password: The new login password to set for the account
:param email: The current email of the account
"""
log.info("[+] Changing the password of the account")
return self._send_xmpp_element(account.ChangePasswordRequest(self.password, new_password, email, self.username)) | [
"def",
"change_password",
"(",
"self",
",",
"new_password",
",",
"email",
")",
":",
"log",
".",
"info",
"(",
"\"[+] Changing the password of the account\"",
")",
"return",
"self",
".",
"_send_xmpp_element",
"(",
"account",
".",
"ChangePasswordRequest",
"(",
"self",
".",
"password",
",",
"new_password",
",",
"email",
",",
"self",
".",
"username",
")",
")"
] | python | Changes the login password
:param new_password: The new login password to set for the account
:param email: The current email of the account | false |
1,710,028 | def parse_timing(self):
"""
Parse the timer data in the main output file of Abinit.
Requires timopt /= 0 in the input file (usually timopt = -1)
Return: :class:`AbinitTimerParser` instance, None if error.
"""
from .abitimer import AbinitTimerParser
parser = AbinitTimerParser()
read_ok = parser.parse(self.output_file.path)
if read_ok:
return parser
return None | [
"def",
"parse_timing",
"(",
"self",
")",
":",
"from",
".",
"abitimer",
"import",
"AbinitTimerParser",
"parser",
"=",
"AbinitTimerParser",
"(",
")",
"read_ok",
"=",
"parser",
".",
"parse",
"(",
"self",
".",
"output_file",
".",
"path",
")",
"if",
"read_ok",
":",
"return",
"parser",
"return",
"None"
] | python | Parse the timer data in the main output file of Abinit.
Requires timopt /= 0 in the input file (usually timopt = -1)
Return: :class:`AbinitTimerParser` instance, None if error. | false |
1,944,300 | def structural_imbalance_ising(S):
"""Construct the Ising problem to calculate the structural imbalance of a signed social network.
A signed social network graph is a graph whose signed edges
represent friendly/hostile interactions between nodes. A
signed social network is considered balanced if it can be cleanly
divided into two factions, where all relations within a faction are
friendly, and all relations between factions are hostile. The measure
of imbalance or frustration is the minimum number of edges that
violate this rule.
Parameters
----------
S : NetworkX graph
A social graph on which each edge has a 'sign' attribute with a numeric value.
Returns
-------
h : dict
The linear biases of the Ising problem. Each variable in the Ising problem represent
a node in the signed social network. The solution that minimized the Ising problem
will assign each variable a value, either -1 or 1. This bi-coloring defines the factions.
J : dict
The quadratic biases of the Ising problem.
Raises
------
ValueError
If any edge does not have a 'sign' attribute.
Examples
--------
>>> import dimod
>>> from dwave_networkx.algorithms.social import structural_imbalance_ising
...
>>> S = nx.Graph()
>>> S.add_edge('Alice', 'Bob', sign=1) # Alice and Bob are friendly
>>> S.add_edge('Alice', 'Eve', sign=-1) # Alice and Eve are hostile
>>> S.add_edge('Bob', 'Eve', sign=-1) # Bob and Eve are hostile
...
>>> h, J = structural_imbalance_ising(S)
>>> h # doctest: +SKIP
{'Alice': 0.0, 'Bob': 0.0, 'Eve': 0.0}
>>> J # doctest: +SKIP
{('Alice', 'Bob'): -1.0, ('Alice', 'Eve'): 1.0, ('Bob', 'Eve'): 1.0}
"""
h = {v: 0.0 for v in S}
J = {}
for u, v, data in S.edges(data=True):
try:
J[(u, v)] = -1. * data['sign']
except KeyError:
raise ValueError(("graph should be a signed social graph,"
"each edge should have a 'sign' attr"))
return h, J | [
"def",
"structural_imbalance_ising",
"(",
"S",
")",
":",
"h",
"=",
"{",
"v",
":",
"0.0",
"for",
"v",
"in",
"S",
"}",
"J",
"=",
"{",
"}",
"for",
"u",
",",
"v",
",",
"data",
"in",
"S",
".",
"edges",
"(",
"data",
"=",
"True",
")",
":",
"try",
":",
"J",
"[",
"(",
"u",
",",
"v",
")",
"]",
"=",
"-",
"1.",
"*",
"data",
"[",
"'sign'",
"]",
"except",
"KeyError",
":",
"raise",
"ValueError",
"(",
"(",
"\"graph should be a signed social graph,\"",
"\"each edge should have a 'sign' attr\"",
")",
")",
"return",
"h",
",",
"J"
] | python | Construct the Ising problem to calculate the structural imbalance of a signed social network.
A signed social network graph is a graph whose signed edges
represent friendly/hostile interactions between nodes. A
signed social network is considered balanced if it can be cleanly
divided into two factions, where all relations within a faction are
friendly, and all relations between factions are hostile. The measure
of imbalance or frustration is the minimum number of edges that
violate this rule.
Parameters
----------
S : NetworkX graph
A social graph on which each edge has a 'sign' attribute with a numeric value.
Returns
-------
h : dict
The linear biases of the Ising problem. Each variable in the Ising problem represent
a node in the signed social network. The solution that minimized the Ising problem
will assign each variable a value, either -1 or 1. This bi-coloring defines the factions.
J : dict
The quadratic biases of the Ising problem.
Raises
------
ValueError
If any edge does not have a 'sign' attribute.
Examples
--------
>>> import dimod
>>> from dwave_networkx.algorithms.social import structural_imbalance_ising
...
>>> S = nx.Graph()
>>> S.add_edge('Alice', 'Bob', sign=1) # Alice and Bob are friendly
>>> S.add_edge('Alice', 'Eve', sign=-1) # Alice and Eve are hostile
>>> S.add_edge('Bob', 'Eve', sign=-1) # Bob and Eve are hostile
...
>>> h, J = structural_imbalance_ising(S)
>>> h # doctest: +SKIP
{'Alice': 0.0, 'Bob': 0.0, 'Eve': 0.0}
>>> J # doctest: +SKIP
{('Alice', 'Bob'): -1.0, ('Alice', 'Eve'): 1.0, ('Bob', 'Eve'): 1.0} | false |
1,655,463 | def _remote_space_available_unix(self, search_pattern=""):
"""Return space available on *nix system (BSD/Linux)."""
self.ssh_ctl_chan._enter_shell()
remote_cmd = "/bin/df -k {}".format(self.file_system)
remote_output = self.ssh_ctl_chan.send_command(
remote_cmd, expect_string=r"[\$#]"
)
# Try to ensure parsing is correct:
# Filesystem 1K-blocks Used Avail Capacity Mounted on
# /dev/bo0s3f 1264808 16376 1147248 1% /cf/var
remote_output = remote_output.strip()
output_lines = remote_output.splitlines()
# First line is the header; second is the actual file system info
header_line = output_lines[0]
filesystem_line = output_lines[1]
if "Filesystem" not in header_line or "Avail" not in header_line.split()[3]:
# Filesystem 1K-blocks Used Avail Capacity Mounted on
msg = "Parsing error, unexpected output from {}:\n{}".format(
remote_cmd, remote_output
)
raise ValueError(msg)
space_available = filesystem_line.split()[3]
if not re.search(r"^\d+$", space_available):
msg = "Parsing error, unexpected output from {}:\n{}".format(
remote_cmd, remote_output
)
raise ValueError(msg)
self.ssh_ctl_chan._return_cli()
return int(space_available) * 1024 | [
"def",
"_remote_space_available_unix",
"(",
"self",
",",
"search_pattern",
"=",
"\"\"",
")",
":",
"self",
".",
"ssh_ctl_chan",
".",
"_enter_shell",
"(",
")",
"remote_cmd",
"=",
"\"/bin/df -k {}\"",
".",
"format",
"(",
"self",
".",
"file_system",
")",
"remote_output",
"=",
"self",
".",
"ssh_ctl_chan",
".",
"send_command",
"(",
"remote_cmd",
",",
"expect_string",
"=",
"r\"[\\$#]\"",
")",
"remote_output",
"=",
"remote_output",
".",
"strip",
"(",
")",
"output_lines",
"=",
"remote_output",
".",
"splitlines",
"(",
")",
"header_line",
"=",
"output_lines",
"[",
"0",
"]",
"filesystem_line",
"=",
"output_lines",
"[",
"1",
"]",
"if",
"\"Filesystem\"",
"not",
"in",
"header_line",
"or",
"\"Avail\"",
"not",
"in",
"header_line",
".",
"split",
"(",
")",
"[",
"3",
"]",
":",
"msg",
"=",
"\"Parsing error, unexpected output from {}:\\n{}\"",
".",
"format",
"(",
"remote_cmd",
",",
"remote_output",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"space_available",
"=",
"filesystem_line",
".",
"split",
"(",
")",
"[",
"3",
"]",
"if",
"not",
"re",
".",
"search",
"(",
"r\"^\\d+$\"",
",",
"space_available",
")",
":",
"msg",
"=",
"\"Parsing error, unexpected output from {}:\\n{}\"",
".",
"format",
"(",
"remote_cmd",
",",
"remote_output",
")",
"raise",
"ValueError",
"(",
"msg",
")",
"self",
".",
"ssh_ctl_chan",
".",
"_return_cli",
"(",
")",
"return",
"int",
"(",
"space_available",
")",
"*",
"1024"
] | python | Return space available on *nix system (BSD/Linux). | false |
2,643,430 | def get_help(self):
"""
Gets the help text for the command. If its not supplied the doc string is used.
"""
if self.help:
return self.help
elif self.__doc__ and self.__doc__.strip():
return self.__doc__.strip()
else:
return '' | [
"def",
"get_help",
"(",
"self",
")",
":",
"if",
"self",
".",
"help",
":",
"return",
"self",
".",
"help",
"elif",
"self",
".",
"__doc__",
"and",
"self",
".",
"__doc__",
".",
"strip",
"(",
")",
":",
"return",
"self",
".",
"__doc__",
".",
"strip",
"(",
")",
"else",
":",
"return",
"''"
] | python | Gets the help text for the command. If its not supplied the doc string is used. | false |
1,995,986 | def __init__(self, cflags):
"""Class constructor that parses the XML generator's command line
Args:
cflags (str): cflags command line arguments passed to the XML
generator
"""
super(cxx_standard, self).__init__()
self._stdcxx = None
self._is_implicit = False
for key in cxx_standard.__STD_CXX:
if key in cflags:
self._stdcxx = key
self._cplusplus = cxx_standard.__STD_CXX[key]
if not self._stdcxx:
if '-std=' in cflags:
raise RuntimeError('Unknown -std=c++xx flag used')
# Assume c++03 by default
self._stdcxx = '-std=c++03'
self._cplusplus = cxx_standard.__STD_CXX['-std=c++03']
self._is_implicit = True | [
"def",
"__init__",
"(",
"self",
",",
"cflags",
")",
":",
"super",
"(",
"cxx_standard",
",",
"self",
")",
".",
"__init__",
"(",
")",
"self",
".",
"_stdcxx",
"=",
"None",
"self",
".",
"_is_implicit",
"=",
"False",
"for",
"key",
"in",
"cxx_standard",
".",
"__STD_CXX",
":",
"if",
"key",
"in",
"cflags",
":",
"self",
".",
"_stdcxx",
"=",
"key",
"self",
".",
"_cplusplus",
"=",
"cxx_standard",
".",
"__STD_CXX",
"[",
"key",
"]",
"if",
"not",
"self",
".",
"_stdcxx",
":",
"if",
"'-std='",
"in",
"cflags",
":",
"raise",
"RuntimeError",
"(",
"'Unknown -std=c++xx flag used'",
")",
"self",
".",
"_stdcxx",
"=",
"'-std=c++03'",
"self",
".",
"_cplusplus",
"=",
"cxx_standard",
".",
"__STD_CXX",
"[",
"'-std=c++03'",
"]",
"self",
".",
"_is_implicit",
"=",
"True"
] | python | Class constructor that parses the XML generator's command line
Args:
cflags (str): cflags command line arguments passed to the XML
generator | false |
1,579,554 | def build_simple_cnn_text_classifier(tok2vec, nr_class, exclusive_classes=False, **cfg):
"""
Build a simple CNN text classifier, given a token-to-vector model as inputs.
If exclusive_classes=True, a softmax non-linearity is applied, so that the
outputs sum to 1. If exclusive_classes=False, a logistic non-linearity
is applied instead, so that outputs are in the range [0, 1].
"""
with Model.define_operators({">>": chain}):
if exclusive_classes:
output_layer = Softmax(nr_class, tok2vec.nO)
else:
output_layer = (
zero_init(Affine(nr_class, tok2vec.nO, drop_factor=0.0)) >> logistic
)
model = tok2vec >> flatten_add_lengths >> Pooling(mean_pool) >> output_layer
model.tok2vec = chain(tok2vec, flatten)
model.nO = nr_class
return model | [
"def",
"build_simple_cnn_text_classifier",
"(",
"tok2vec",
",",
"nr_class",
",",
"exclusive_classes",
"=",
"False",
",",
"**",
"cfg",
")",
":",
"with",
"Model",
".",
"define_operators",
"(",
"{",
"\">>\"",
":",
"chain",
"}",
")",
":",
"if",
"exclusive_classes",
":",
"output_layer",
"=",
"Softmax",
"(",
"nr_class",
",",
"tok2vec",
".",
"nO",
")",
"else",
":",
"output_layer",
"=",
"(",
"zero_init",
"(",
"Affine",
"(",
"nr_class",
",",
"tok2vec",
".",
"nO",
",",
"drop_factor",
"=",
"0.0",
")",
")",
">>",
"logistic",
")",
"model",
"=",
"tok2vec",
">>",
"flatten_add_lengths",
">>",
"Pooling",
"(",
"mean_pool",
")",
">>",
"output_layer",
"model",
".",
"tok2vec",
"=",
"chain",
"(",
"tok2vec",
",",
"flatten",
")",
"model",
".",
"nO",
"=",
"nr_class",
"return",
"model"
] | python | Build a simple CNN text classifier, given a token-to-vector model as inputs.
If exclusive_classes=True, a softmax non-linearity is applied, so that the
outputs sum to 1. If exclusive_classes=False, a logistic non-linearity
is applied instead, so that outputs are in the range [0, 1]. | false |
2,278,910 | def multivariate_normal(random, mean, cov):
"""
Draw random samples from a multivariate normal distribution.
Parameters
----------
random : np.random.RandomState instance
Random state.
mean : array_like
Mean of the n-dimensional distribution.
cov : array_like
Covariance matrix of the distribution. It must be symmetric and
positive-definite for proper sampling.
Returns
-------
out : ndarray
The drawn sample.
"""
from numpy.linalg import cholesky
L = cholesky(cov)
return L @ random.randn(L.shape[0]) + mean | [
"def",
"multivariate_normal",
"(",
"random",
",",
"mean",
",",
"cov",
")",
":",
"from",
"numpy",
".",
"linalg",
"import",
"cholesky",
"L",
"=",
"cholesky",
"(",
"cov",
")",
"return",
"L",
"@",
"random",
".",
"randn",
"(",
"L",
".",
"shape",
"[",
"0",
"]",
")",
"+",
"mean"
] | python | Draw random samples from a multivariate normal distribution.
Parameters
----------
random : np.random.RandomState instance
Random state.
mean : array_like
Mean of the n-dimensional distribution.
cov : array_like
Covariance matrix of the distribution. It must be symmetric and
positive-definite for proper sampling.
Returns
-------
out : ndarray
The drawn sample. | false |
1,945,785 | def check_coin_a_phrase_from(text):
"""Check the text."""
err = "misc.illogic.coin"
msg = "You can't coin an existing phrase. Did you mean 'borrow'?"
regex = "to coin a phrase from"
return existence_check(text, [regex], err, msg, offset=1) | [
"def",
"check_coin_a_phrase_from",
"(",
"text",
")",
":",
"err",
"=",
"\"misc.illogic.coin\"",
"msg",
"=",
"\"You can't coin an existing phrase. Did you mean 'borrow'?\"",
"regex",
"=",
"\"to coin a phrase from\"",
"return",
"existence_check",
"(",
"text",
",",
"[",
"regex",
"]",
",",
"err",
",",
"msg",
",",
"offset",
"=",
"1",
")"
] | python | Check the text. | false |
1,592,792 | def _initialize(self):
"""Initialize collector worker thread, Log path will be checked first.
Records in DB backend will be cleared.
"""
if not os.path.exists(self._logdir):
raise CollectorError("Log directory %s not exists" % self._logdir)
self.logger.info("Collector started, taking %s as parent directory"
"for all job logs." % self._logdir)
# clear old records
JobRecord.objects.filter().delete()
TrialRecord.objects.filter().delete()
ResultRecord.objects.filter().delete() | [
"def",
"_initialize",
"(",
"self",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"_logdir",
")",
":",
"raise",
"CollectorError",
"(",
"\"Log directory %s not exists\"",
"%",
"self",
".",
"_logdir",
")",
"self",
".",
"logger",
".",
"info",
"(",
"\"Collector started, taking %s as parent directory\"",
"\"for all job logs.\"",
"%",
"self",
".",
"_logdir",
")",
"JobRecord",
".",
"objects",
".",
"filter",
"(",
")",
".",
"delete",
"(",
")",
"TrialRecord",
".",
"objects",
".",
"filter",
"(",
")",
".",
"delete",
"(",
")",
"ResultRecord",
".",
"objects",
".",
"filter",
"(",
")",
".",
"delete",
"(",
")"
] | python | Initialize collector worker thread, Log path will be checked first.
Records in DB backend will be cleared. | false |
2,680,156 | def swap_memory():
"""Swap system memory as a (total, used, free, sin, sout) tuple."""
mem = _psutil_mswindows.get_virtual_mem()
total = mem[2]
free = mem[3]
used = total - free
percent = usage_percent(used, total, _round=1)
return nt_swapmeminfo(total, used, free, percent, 0, 0) | [
"def",
"swap_memory",
"(",
")",
":",
"mem",
"=",
"_psutil_mswindows",
".",
"get_virtual_mem",
"(",
")",
"total",
"=",
"mem",
"[",
"2",
"]",
"free",
"=",
"mem",
"[",
"3",
"]",
"used",
"=",
"total",
"-",
"free",
"percent",
"=",
"usage_percent",
"(",
"used",
",",
"total",
",",
"_round",
"=",
"1",
")",
"return",
"nt_swapmeminfo",
"(",
"total",
",",
"used",
",",
"free",
",",
"percent",
",",
"0",
",",
"0",
")"
] | python | Swap system memory as a (total, used, free, sin, sout) tuple. | false |
2,170,453 | def make_function(function_builders, *, closure):
"""
Construct a FunctionDef AST node from a sequence of the form:
LOAD_CLOSURE, N times (when handling MAKE_CLOSURE)
BUILD_TUPLE(N) (when handling MAKE_CLOSURE)
<decorator builders> (optional)
<default builders>, (optional)
<annotation builders> (optional)
LOAD_CONST(<tuple of annotated names>) (optional)
LOAD_CONST(code),
LOAD_CONST(name),
MAKE_FUNCTION | MAKE_CLOSURE
<decorator calls> (optional)
"""
decorator_calls = deque()
while isinstance(function_builders[-1], instrs.CALL_FUNCTION):
decorator_calls.appendleft(function_builders.pop())
*builders, load_code_instr, load_name_instr, make_function_instr = (
function_builders
)
_check_make_function_instrs(
load_code_instr, load_name_instr, make_function_instr,
)
co = load_code_instr.arg
name = load_name_instr.arg
args, kwonly, varargs, varkwargs = paramnames(co)
# Convert default and annotation builders to AST nodes.
defaults, kw_defaults, annotations = make_defaults_and_annotations(
make_function_instr,
builders,
)
# Convert decorator function builders. The stack is in reverse order.
decorators = [make_expr(builders) for _ in decorator_calls]
decorators.reverse()
if closure:
# There should be a tuple of closure cells still on the stack here.
# These don't appear in the AST, but we need to consume them to ensure
# correctness down the line.
closure_cells = make_closure_cells(builders) # noqa
# We should have consumed all our builders by this point.
if builders:
raise DecompilationError(
"Unexpected leftover builders for %s: %s." % (
make_function_instr, builders
)
)
return ast.FunctionDef(
body_code=co,
name=name.split('.')[-1],
args=make_function_arguments(
args,
kwonly,
varargs,
varkwargs,
defaults,
kw_defaults,
annotations,
),
body=pycode_to_body(co, DecompilationContext(in_function_block=True)),
decorator_list=decorators,
returns=annotations.get('return'),
) | [
"def",
"make_function",
"(",
"function_builders",
",",
"*",
",",
"closure",
")",
":",
"decorator_calls",
"=",
"deque",
"(",
")",
"while",
"isinstance",
"(",
"function_builders",
"[",
"-",
"1",
"]",
",",
"instrs",
".",
"CALL_FUNCTION",
")",
":",
"decorator_calls",
".",
"appendleft",
"(",
"function_builders",
".",
"pop",
"(",
")",
")",
"*",
"builders",
",",
"load_code_instr",
",",
"load_name_instr",
",",
"make_function_instr",
"=",
"(",
"function_builders",
")",
"_check_make_function_instrs",
"(",
"load_code_instr",
",",
"load_name_instr",
",",
"make_function_instr",
",",
")",
"co",
"=",
"load_code_instr",
".",
"arg",
"name",
"=",
"load_name_instr",
".",
"arg",
"args",
",",
"kwonly",
",",
"varargs",
",",
"varkwargs",
"=",
"paramnames",
"(",
"co",
")",
"defaults",
",",
"kw_defaults",
",",
"annotations",
"=",
"make_defaults_and_annotations",
"(",
"make_function_instr",
",",
"builders",
",",
")",
"decorators",
"=",
"[",
"make_expr",
"(",
"builders",
")",
"for",
"_",
"in",
"decorator_calls",
"]",
"decorators",
".",
"reverse",
"(",
")",
"if",
"closure",
":",
"closure_cells",
"=",
"make_closure_cells",
"(",
"builders",
")",
"if",
"builders",
":",
"raise",
"DecompilationError",
"(",
"\"Unexpected leftover builders for %s: %s.\"",
"%",
"(",
"make_function_instr",
",",
"builders",
")",
")",
"return",
"ast",
".",
"FunctionDef",
"(",
"body_code",
"=",
"co",
",",
"name",
"=",
"name",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
",",
"args",
"=",
"make_function_arguments",
"(",
"args",
",",
"kwonly",
",",
"varargs",
",",
"varkwargs",
",",
"defaults",
",",
"kw_defaults",
",",
"annotations",
",",
")",
",",
"body",
"=",
"pycode_to_body",
"(",
"co",
",",
"DecompilationContext",
"(",
"in_function_block",
"=",
"True",
")",
")",
",",
"decorator_list",
"=",
"decorators",
",",
"returns",
"=",
"annotations",
".",
"get",
"(",
"'return'",
")",
",",
")"
] | python | Construct a FunctionDef AST node from a sequence of the form:
LOAD_CLOSURE, N times (when handling MAKE_CLOSURE)
BUILD_TUPLE(N) (when handling MAKE_CLOSURE)
<decorator builders> (optional)
<default builders>, (optional)
<annotation builders> (optional)
LOAD_CONST(<tuple of annotated names>) (optional)
LOAD_CONST(code),
LOAD_CONST(name),
MAKE_FUNCTION | MAKE_CLOSURE
<decorator calls> (optional) | false |
2,688,296 | def show_port(self, port, **_params):
"""Fetches information of a certain port."""
return self.get(self.port_path % (port), params=_params) | [
"def",
"show_port",
"(",
"self",
",",
"port",
",",
"**",
"_params",
")",
":",
"return",
"self",
".",
"get",
"(",
"self",
".",
"port_path",
"%",
"(",
"port",
")",
",",
"params",
"=",
"_params",
")"
] | python | Fetches information of a certain port. | false |
1,999,488 | def paint_single_path(self, gstate, stroke, fill, evenodd, path):
"""
Converting a single path draw command into lines and curves objects
"""
if len(path) < 2:
return
shape = "".join(x[0] for x in path)
pts = []
for p in path:
for i in range(1, len(p), 2):
pts.append(apply_matrix_pt(self.ctm, (p[i], p[i + 1])))
# Line mode
if self.line_only_shape.match(shape):
# check for sloped lines first
has_slope = False
for i in range(len(pts) - 1):
if pts[i][0] != pts[i + 1][0] and pts[i][1] != pts[i + 1][1]:
has_slope = True
break
if not has_slope:
for i in range(len(pts) - 1):
self.cur_item.add(LTLine(gstate.linewidth, pts[i], pts[i + 1]))
# Adding the closing line for a polygon, especially rectangles
if shape.endswith("h"):
self.cur_item.add(LTLine(gstate.linewidth, pts[0], pts[-1]))
return
# Add the curve as an arbitrary polyline (belzier curve info is lost here)
self.cur_item.add(LTCurve(gstate.linewidth, pts)) | [
"def",
"paint_single_path",
"(",
"self",
",",
"gstate",
",",
"stroke",
",",
"fill",
",",
"evenodd",
",",
"path",
")",
":",
"if",
"len",
"(",
"path",
")",
"<",
"2",
":",
"return",
"shape",
"=",
"\"\"",
".",
"join",
"(",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"path",
")",
"pts",
"=",
"[",
"]",
"for",
"p",
"in",
"path",
":",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"p",
")",
",",
"2",
")",
":",
"pts",
".",
"append",
"(",
"apply_matrix_pt",
"(",
"self",
".",
"ctm",
",",
"(",
"p",
"[",
"i",
"]",
",",
"p",
"[",
"i",
"+",
"1",
"]",
")",
")",
")",
"if",
"self",
".",
"line_only_shape",
".",
"match",
"(",
"shape",
")",
":",
"has_slope",
"=",
"False",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"pts",
")",
"-",
"1",
")",
":",
"if",
"pts",
"[",
"i",
"]",
"[",
"0",
"]",
"!=",
"pts",
"[",
"i",
"+",
"1",
"]",
"[",
"0",
"]",
"and",
"pts",
"[",
"i",
"]",
"[",
"1",
"]",
"!=",
"pts",
"[",
"i",
"+",
"1",
"]",
"[",
"1",
"]",
":",
"has_slope",
"=",
"True",
"break",
"if",
"not",
"has_slope",
":",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"pts",
")",
"-",
"1",
")",
":",
"self",
".",
"cur_item",
".",
"add",
"(",
"LTLine",
"(",
"gstate",
".",
"linewidth",
",",
"pts",
"[",
"i",
"]",
",",
"pts",
"[",
"i",
"+",
"1",
"]",
")",
")",
"if",
"shape",
".",
"endswith",
"(",
"\"h\"",
")",
":",
"self",
".",
"cur_item",
".",
"add",
"(",
"LTLine",
"(",
"gstate",
".",
"linewidth",
",",
"pts",
"[",
"0",
"]",
",",
"pts",
"[",
"-",
"1",
"]",
")",
")",
"return",
"self",
".",
"cur_item",
".",
"add",
"(",
"LTCurve",
"(",
"gstate",
".",
"linewidth",
",",
"pts",
")",
")"
] | python | Converting a single path draw command into lines and curves objects | false |
2,255,637 | def with_indents(self, s, indent=0, stacklevel=3):
"""
Substitute a string with the indented :attr:`params`
Parameters
----------
s: str
The string in which to substitute
indent: int
The number of spaces that the substitution should be indented
stacklevel: int
The stacklevel for the warning raised in :func:`safe_module` when
encountering an invalid key in the string
Returns
-------
str
The substituted string
See Also
--------
with_indent, dedents"""
# we make a new dictionary with objects that indent the original
# strings if necessary. Note that the first line is not indented
d = {key: _StrWithIndentation(val, indent)
for key, val in six.iteritems(self.params)}
return safe_modulo(s, d, stacklevel=stacklevel) | [
"def",
"with_indents",
"(",
"self",
",",
"s",
",",
"indent",
"=",
"0",
",",
"stacklevel",
"=",
"3",
")",
":",
"d",
"=",
"{",
"key",
":",
"_StrWithIndentation",
"(",
"val",
",",
"indent",
")",
"for",
"key",
",",
"val",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"params",
")",
"}",
"return",
"safe_modulo",
"(",
"s",
",",
"d",
",",
"stacklevel",
"=",
"stacklevel",
")"
] | python | Substitute a string with the indented :attr:`params`
Parameters
----------
s: str
The string in which to substitute
indent: int
The number of spaces that the substitution should be indented
stacklevel: int
The stacklevel for the warning raised in :func:`safe_module` when
encountering an invalid key in the string
Returns
-------
str
The substituted string
See Also
--------
with_indent, dedents | false |
2,644,216 | def train_test_split(self):
'''
Get a new train-test split of the Corpus' dialogues (each train and
test samplest is represented by a SampleSet obejct
'''
self.log('info', 'Splitting the corpus into train/test subsets ...')
# grab some hyperparameters from our config
split = self.config['train-test-split']
rand_state = self.config['random-state']
# split the corpus into train and test samples
# train, test = train_test_split(self.dialogues, train_size=split, random_state=rand_state)
shuffled = np.random.permutation(self.dialogues)
split_index = ceil(split * len(self.dialogues))
train, test = np.split(shuffled, [split_index])
train = np.copy(train)
test = np.copy(test)
train = SampleSet(train, properties=self.properties, enc_dec_splitter=self.enc_dec_splitter)
train.prepare_sampleset()
test = SampleSet(test, properties=self.properties, enc_dec_splitter=self.enc_dec_splitter)
test.prepare_sampleset()
return train, test | [
"def",
"train_test_split",
"(",
"self",
")",
":",
"self",
".",
"log",
"(",
"'info'",
",",
"'Splitting the corpus into train/test subsets ...'",
")",
"split",
"=",
"self",
".",
"config",
"[",
"'train-test-split'",
"]",
"rand_state",
"=",
"self",
".",
"config",
"[",
"'random-state'",
"]",
"shuffled",
"=",
"np",
".",
"random",
".",
"permutation",
"(",
"self",
".",
"dialogues",
")",
"split_index",
"=",
"ceil",
"(",
"split",
"*",
"len",
"(",
"self",
".",
"dialogues",
")",
")",
"train",
",",
"test",
"=",
"np",
".",
"split",
"(",
"shuffled",
",",
"[",
"split_index",
"]",
")",
"train",
"=",
"np",
".",
"copy",
"(",
"train",
")",
"test",
"=",
"np",
".",
"copy",
"(",
"test",
")",
"train",
"=",
"SampleSet",
"(",
"train",
",",
"properties",
"=",
"self",
".",
"properties",
",",
"enc_dec_splitter",
"=",
"self",
".",
"enc_dec_splitter",
")",
"train",
".",
"prepare_sampleset",
"(",
")",
"test",
"=",
"SampleSet",
"(",
"test",
",",
"properties",
"=",
"self",
".",
"properties",
",",
"enc_dec_splitter",
"=",
"self",
".",
"enc_dec_splitter",
")",
"test",
".",
"prepare_sampleset",
"(",
")",
"return",
"train",
",",
"test"
] | python | Get a new train-test split of the Corpus' dialogues (each train and
test samplest is represented by a SampleSet obejct | false |
2,708,297 | def lock(resources, *args, **kwargs):
""" Lock resources from the command line, for example for maintenance. """
# all resources are locked if nothing is specified
if not resources:
client = redis.Redis(decode_responses=True, **kwargs)
resources = find_resources(client)
if not resources:
return
# create one process per pid
locker = Locker(**kwargs)
while len(resources) > 1:
pid = os.fork()
resources = resources[:1] if pid else resources[1:]
# at this point there is only one resource - lock it down
resource = resources[0]
try:
print('{}: acquiring'.format(resource))
with locker.lock(resource, label='lock tool'):
print('{}: locked'.format(resource))
try:
signal.pause()
except KeyboardInterrupt:
print('{}: released'.format(resource))
except KeyboardInterrupt:
print('{}: canceled'.format(resource)) | [
"def",
"lock",
"(",
"resources",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"if",
"not",
"resources",
":",
"client",
"=",
"redis",
".",
"Redis",
"(",
"decode_responses",
"=",
"True",
",",
"**",
"kwargs",
")",
"resources",
"=",
"find_resources",
"(",
"client",
")",
"if",
"not",
"resources",
":",
"return",
"locker",
"=",
"Locker",
"(",
"**",
"kwargs",
")",
"while",
"len",
"(",
"resources",
")",
">",
"1",
":",
"pid",
"=",
"os",
".",
"fork",
"(",
")",
"resources",
"=",
"resources",
"[",
":",
"1",
"]",
"if",
"pid",
"else",
"resources",
"[",
"1",
":",
"]",
"resource",
"=",
"resources",
"[",
"0",
"]",
"try",
":",
"print",
"(",
"'{}: acquiring'",
".",
"format",
"(",
"resource",
")",
")",
"with",
"locker",
".",
"lock",
"(",
"resource",
",",
"label",
"=",
"'lock tool'",
")",
":",
"print",
"(",
"'{}: locked'",
".",
"format",
"(",
"resource",
")",
")",
"try",
":",
"signal",
".",
"pause",
"(",
")",
"except",
"KeyboardInterrupt",
":",
"print",
"(",
"'{}: released'",
".",
"format",
"(",
"resource",
")",
")",
"except",
"KeyboardInterrupt",
":",
"print",
"(",
"'{}: canceled'",
".",
"format",
"(",
"resource",
")",
")"
] | python | Lock resources from the command line, for example for maintenance. | false |
2,250,084 | def success(text):
'''Display a success message'''
print(' '.join((green('✔'), white(text))))
sys.stdout.flush() | [
"def",
"success",
"(",
"text",
")",
":",
"print",
"(",
"' '",
".",
"join",
"(",
"(",
"green",
"(",
"'✔'",
")",
",",
"white",
"(",
"text",
")",
")",
")",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")"
] | python | Display a success message | false |
2,242,760 | def _conversion(self, input_signal, vad_file):
"""
Converts an external VAD to follow the Spear convention.
Energy is used in order to avoind out-of-bound array indexes.
"""
e = bob.ap.Energy(rate_wavsample[0], self.win_length_ms, self.win_shift_ms)
energy_array = e(rate_wavsample[1])
labels = self.use_existing_vad(energy_array, vad_file)
return labels | [
"def",
"_conversion",
"(",
"self",
",",
"input_signal",
",",
"vad_file",
")",
":",
"e",
"=",
"bob",
".",
"ap",
".",
"Energy",
"(",
"rate_wavsample",
"[",
"0",
"]",
",",
"self",
".",
"win_length_ms",
",",
"self",
".",
"win_shift_ms",
")",
"energy_array",
"=",
"e",
"(",
"rate_wavsample",
"[",
"1",
"]",
")",
"labels",
"=",
"self",
".",
"use_existing_vad",
"(",
"energy_array",
",",
"vad_file",
")",
"return",
"labels"
] | python | Converts an external VAD to follow the Spear convention.
Energy is used in order to avoind out-of-bound array indexes. | false |
2,349,792 | def handleException(self, exc_type, exc_param, exc_tb):
"Exception handler (False to abort)"
self.writeline(''.join( traceback.format_exception(exc_type, exc_param, exc_tb) ))
return True | [
"def",
"handleException",
"(",
"self",
",",
"exc_type",
",",
"exc_param",
",",
"exc_tb",
")",
":",
"self",
".",
"writeline",
"(",
"''",
".",
"join",
"(",
"traceback",
".",
"format_exception",
"(",
"exc_type",
",",
"exc_param",
",",
"exc_tb",
")",
")",
")",
"return",
"True"
] | python | Exception handler (False to abort) | false |
2,167,875 | def create(self, product, data, store_view=None, identifierType=None):
"""
Upload a new product image.
:param product: ID or SKU of product
:param data: `dict` of image data (label, position, exclude, types)
Example: { 'label': 'description of photo',
'position': '1', 'exclude': '0',
'types': ['image', 'small_image', 'thumbnail']}
:param store_view: Store view ID or Code
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: string - image file name
"""
return self.call('catalog_product_attribute_media.create',
[product, data, store_view, identifierType]) | [
"def",
"create",
"(",
"self",
",",
"product",
",",
"data",
",",
"store_view",
"=",
"None",
",",
"identifierType",
"=",
"None",
")",
":",
"return",
"self",
".",
"call",
"(",
"'catalog_product_attribute_media.create'",
",",
"[",
"product",
",",
"data",
",",
"store_view",
",",
"identifierType",
"]",
")"
] | python | Upload a new product image.
:param product: ID or SKU of product
:param data: `dict` of image data (label, position, exclude, types)
Example: { 'label': 'description of photo',
'position': '1', 'exclude': '0',
'types': ['image', 'small_image', 'thumbnail']}
:param store_view: Store view ID or Code
:param identifierType: Defines whether the product or SKU value is
passed in the "product" parameter.
:return: string - image file name | false |
1,871,576 | def set_version(self, version):
"""
Set the version subfield (RFC 2459, section 4.1.2.1) of the certificate
request.
:param int version: The version number.
:return: ``None``
"""
set_result = _lib.X509_REQ_set_version(self._req, version)
_openssl_assert(set_result == 1) | [
"def",
"set_version",
"(",
"self",
",",
"version",
")",
":",
"set_result",
"=",
"_lib",
".",
"X509_REQ_set_version",
"(",
"self",
".",
"_req",
",",
"version",
")",
"_openssl_assert",
"(",
"set_result",
"==",
"1",
")"
] | python | Set the version subfield (RFC 2459, section 4.1.2.1) of the certificate
request.
:param int version: The version number.
:return: ``None`` | false |
2,283,735 | def _get_file_sha1(file):
"""Return the SHA1 hash of the given a file-like object as ``file``.
This will seek the file back to 0 when it's finished.
"""
bits = file.read()
file.seek(0)
h = hashlib.new('sha1', bits).hexdigest()
return h | [
"def",
"_get_file_sha1",
"(",
"file",
")",
":",
"bits",
"=",
"file",
".",
"read",
"(",
")",
"file",
".",
"seek",
"(",
"0",
")",
"h",
"=",
"hashlib",
".",
"new",
"(",
"'sha1'",
",",
"bits",
")",
".",
"hexdigest",
"(",
")",
"return",
"h"
] | python | Return the SHA1 hash of the given a file-like object as ``file``.
This will seek the file back to 0 when it's finished. | false |
2,260,427 | def _tree_load_sub_branch(self, traj_node, branch_name,
load_data=pypetconstants.LOAD_DATA,
with_links=True, recursive=False,
max_depth=None, _trajectory=None,
_as_new=False, _hdf5_group=None):
"""Loads data starting from a node along a branch and starts recursively loading
all data at end of branch.
:param traj_node: The node from where loading starts
:param branch_name:
A branch along which loading progresses. Colon Notation is used:
'group1.group2.group3' loads 'group1', then 'group2', then 'group3' and then finally
recursively all children and children's children below 'group3'
:param load_data:
How to load the data
:param with_links:
If links should be loaded
:param recursive:
If loading recursively
:param max_depth:
The maximum depth to load the tree
:param _trajectory:
The trajectory
:param _as_new:
If trajectory is loaded as new
:param _hdf5_group:
HDF5 node in the file corresponding to `traj_node`.
"""
if load_data == pypetconstants.LOAD_NOTHING:
return
if max_depth is None:
max_depth = float('inf')
if _trajectory is None:
_trajectory = traj_node.v_root
if _hdf5_group is None:
hdf5_group_name = traj_node.v_full_name.replace('.', '/')
# Get child node to load
if hdf5_group_name == '':
_hdf5_group = self._trajectory_group
else:
try:
_hdf5_group = self._hdf5file.get_node(where=self._trajectory_group,
name=hdf5_group_name)
except pt.NoSuchNodeError:
self._logger.error('Cannot find `%s` the hdf5 node `%s` does not exist!'
% (traj_node.v_full_name, hdf5_group_name))
raise
split_names = branch_name.split('.')
final_group_name = split_names.pop()
current_depth = 1
for name in split_names:
if current_depth > max_depth:
return
# First load along the branch
_hdf5_group = getattr(_hdf5_group, name)
self._tree_load_nodes_dfs(traj_node, load_data=load_data, with_links=with_links,
recursive=False, max_depth=max_depth, current_depth=current_depth,
trajectory=_trajectory, as_new=_as_new,
hdf5_group=_hdf5_group)
current_depth += 1
traj_node = traj_node._children[name]
if current_depth <= max_depth:
# Then load recursively all data in the last group and below
_hdf5_group = getattr(_hdf5_group, final_group_name)
self._tree_load_nodes_dfs(traj_node, load_data=load_data, with_links=with_links,
recursive=recursive, max_depth=max_depth,
current_depth=current_depth, trajectory=_trajectory,
as_new=_as_new, hdf5_group=_hdf5_group) | [
"def",
"_tree_load_sub_branch",
"(",
"self",
",",
"traj_node",
",",
"branch_name",
",",
"load_data",
"=",
"pypetconstants",
".",
"LOAD_DATA",
",",
"with_links",
"=",
"True",
",",
"recursive",
"=",
"False",
",",
"max_depth",
"=",
"None",
",",
"_trajectory",
"=",
"None",
",",
"_as_new",
"=",
"False",
",",
"_hdf5_group",
"=",
"None",
")",
":",
"if",
"load_data",
"==",
"pypetconstants",
".",
"LOAD_NOTHING",
":",
"return",
"if",
"max_depth",
"is",
"None",
":",
"max_depth",
"=",
"float",
"(",
"'inf'",
")",
"if",
"_trajectory",
"is",
"None",
":",
"_trajectory",
"=",
"traj_node",
".",
"v_root",
"if",
"_hdf5_group",
"is",
"None",
":",
"hdf5_group_name",
"=",
"traj_node",
".",
"v_full_name",
".",
"replace",
"(",
"'.'",
",",
"'/'",
")",
"if",
"hdf5_group_name",
"==",
"''",
":",
"_hdf5_group",
"=",
"self",
".",
"_trajectory_group",
"else",
":",
"try",
":",
"_hdf5_group",
"=",
"self",
".",
"_hdf5file",
".",
"get_node",
"(",
"where",
"=",
"self",
".",
"_trajectory_group",
",",
"name",
"=",
"hdf5_group_name",
")",
"except",
"pt",
".",
"NoSuchNodeError",
":",
"self",
".",
"_logger",
".",
"error",
"(",
"'Cannot find `%s` the hdf5 node `%s` does not exist!'",
"%",
"(",
"traj_node",
".",
"v_full_name",
",",
"hdf5_group_name",
")",
")",
"raise",
"split_names",
"=",
"branch_name",
".",
"split",
"(",
"'.'",
")",
"final_group_name",
"=",
"split_names",
".",
"pop",
"(",
")",
"current_depth",
"=",
"1",
"for",
"name",
"in",
"split_names",
":",
"if",
"current_depth",
">",
"max_depth",
":",
"return",
"_hdf5_group",
"=",
"getattr",
"(",
"_hdf5_group",
",",
"name",
")",
"self",
".",
"_tree_load_nodes_dfs",
"(",
"traj_node",
",",
"load_data",
"=",
"load_data",
",",
"with_links",
"=",
"with_links",
",",
"recursive",
"=",
"False",
",",
"max_depth",
"=",
"max_depth",
",",
"current_depth",
"=",
"current_depth",
",",
"trajectory",
"=",
"_trajectory",
",",
"as_new",
"=",
"_as_new",
",",
"hdf5_group",
"=",
"_hdf5_group",
")",
"current_depth",
"+=",
"1",
"traj_node",
"=",
"traj_node",
".",
"_children",
"[",
"name",
"]",
"if",
"current_depth",
"<=",
"max_depth",
":",
"_hdf5_group",
"=",
"getattr",
"(",
"_hdf5_group",
",",
"final_group_name",
")",
"self",
".",
"_tree_load_nodes_dfs",
"(",
"traj_node",
",",
"load_data",
"=",
"load_data",
",",
"with_links",
"=",
"with_links",
",",
"recursive",
"=",
"recursive",
",",
"max_depth",
"=",
"max_depth",
",",
"current_depth",
"=",
"current_depth",
",",
"trajectory",
"=",
"_trajectory",
",",
"as_new",
"=",
"_as_new",
",",
"hdf5_group",
"=",
"_hdf5_group",
")"
] | python | Loads data starting from a node along a branch and starts recursively loading
all data at end of branch.
:param traj_node: The node from where loading starts
:param branch_name:
A branch along which loading progresses. Colon Notation is used:
'group1.group2.group3' loads 'group1', then 'group2', then 'group3' and then finally
recursively all children and children's children below 'group3'
:param load_data:
How to load the data
:param with_links:
If links should be loaded
:param recursive:
If loading recursively
:param max_depth:
The maximum depth to load the tree
:param _trajectory:
The trajectory
:param _as_new:
If trajectory is loaded as new
:param _hdf5_group:
HDF5 node in the file corresponding to `traj_node`. | false |
2,161,287 | def _swap_slice_indices(self, slc, make_slice=False):
'''Swap slice indices
Change slice indices from Verilog slicing (e.g. IEEE 1800-2012) to Python slicing.
'''
try:
start = slc.start
stop = slc.stop
slc_step = slc.step
except AttributeError:
if make_slice:
if slc < 0:
slc += self.length()
return slice(slc, slc + 1)
else:
return slc
else:
if not start and start != 0:
slc_stop = self.length()
elif start < 0:
slc_stop = self.length() + start + 1
else:
slc_stop = start + 1
if not stop and stop != 0:
slc_start = 0
elif stop < 0:
slc_start = self.length() + stop
else:
slc_start = stop
return slice(slc_start, slc_stop, slc_step) | [
"def",
"_swap_slice_indices",
"(",
"self",
",",
"slc",
",",
"make_slice",
"=",
"False",
")",
":",
"try",
":",
"start",
"=",
"slc",
".",
"start",
"stop",
"=",
"slc",
".",
"stop",
"slc_step",
"=",
"slc",
".",
"step",
"except",
"AttributeError",
":",
"if",
"make_slice",
":",
"if",
"slc",
"<",
"0",
":",
"slc",
"+=",
"self",
".",
"length",
"(",
")",
"return",
"slice",
"(",
"slc",
",",
"slc",
"+",
"1",
")",
"else",
":",
"return",
"slc",
"else",
":",
"if",
"not",
"start",
"and",
"start",
"!=",
"0",
":",
"slc_stop",
"=",
"self",
".",
"length",
"(",
")",
"elif",
"start",
"<",
"0",
":",
"slc_stop",
"=",
"self",
".",
"length",
"(",
")",
"+",
"start",
"+",
"1",
"else",
":",
"slc_stop",
"=",
"start",
"+",
"1",
"if",
"not",
"stop",
"and",
"stop",
"!=",
"0",
":",
"slc_start",
"=",
"0",
"elif",
"stop",
"<",
"0",
":",
"slc_start",
"=",
"self",
".",
"length",
"(",
")",
"+",
"stop",
"else",
":",
"slc_start",
"=",
"stop",
"return",
"slice",
"(",
"slc_start",
",",
"slc_stop",
",",
"slc_step",
")"
] | python | Swap slice indices
Change slice indices from Verilog slicing (e.g. IEEE 1800-2012) to Python slicing. | false |
1,938,915 | def from_array(self, array, grid='DH', copy=True):
"""
Initialize the class instance from an input array.
Usage
-----
x = SHGrid.from_array(array, [grid, copy])
Returns
-------
x : SHGrid class instance
Parameters
----------
array : ndarray, shape (nlat, nlon)
2-D numpy array of the gridded data, where nlat and nlon are the
number of latitudinal and longitudinal bands, respectively.
grid : str, optional, default = 'DH'
'DH' or 'GLQ' for Driscoll and Healy grids or Gauss Legendre
Quadrature grids, respectively.
copy : bool, optional, default = True
If True (default), make a copy of array when initializing the class
instance. If False, initialize the class instance with a reference
to array.
"""
if _np.iscomplexobj(array):
kind = 'complex'
else:
kind = 'real'
if type(grid) != str:
raise ValueError('grid must be a string. ' +
'Input type was {:s}'
.format(str(type(grid))))
if grid.upper() not in set(['DH', 'GLQ']):
raise ValueError(
"grid must be 'DH' or 'GLQ'. Input value was {:s}."
.format(repr(grid))
)
for cls in self.__subclasses__():
if cls.istype(kind) and cls.isgrid(grid):
return cls(array, copy=copy) | [
"def",
"from_array",
"(",
"self",
",",
"array",
",",
"grid",
"=",
"'DH'",
",",
"copy",
"=",
"True",
")",
":",
"if",
"_np",
".",
"iscomplexobj",
"(",
"array",
")",
":",
"kind",
"=",
"'complex'",
"else",
":",
"kind",
"=",
"'real'",
"if",
"type",
"(",
"grid",
")",
"!=",
"str",
":",
"raise",
"ValueError",
"(",
"'grid must be a string. '",
"+",
"'Input type was {:s}'",
".",
"format",
"(",
"str",
"(",
"type",
"(",
"grid",
")",
")",
")",
")",
"if",
"grid",
".",
"upper",
"(",
")",
"not",
"in",
"set",
"(",
"[",
"'DH'",
",",
"'GLQ'",
"]",
")",
":",
"raise",
"ValueError",
"(",
"\"grid must be 'DH' or 'GLQ'. Input value was {:s}.\"",
".",
"format",
"(",
"repr",
"(",
"grid",
")",
")",
")",
"for",
"cls",
"in",
"self",
".",
"__subclasses__",
"(",
")",
":",
"if",
"cls",
".",
"istype",
"(",
"kind",
")",
"and",
"cls",
".",
"isgrid",
"(",
"grid",
")",
":",
"return",
"cls",
"(",
"array",
",",
"copy",
"=",
"copy",
")"
] | python | Initialize the class instance from an input array.
Usage
-----
x = SHGrid.from_array(array, [grid, copy])
Returns
-------
x : SHGrid class instance
Parameters
----------
array : ndarray, shape (nlat, nlon)
2-D numpy array of the gridded data, where nlat and nlon are the
number of latitudinal and longitudinal bands, respectively.
grid : str, optional, default = 'DH'
'DH' or 'GLQ' for Driscoll and Healy grids or Gauss Legendre
Quadrature grids, respectively.
copy : bool, optional, default = True
If True (default), make a copy of array when initializing the class
instance. If False, initialize the class instance with a reference
to array. | false |
2,190,308 | def fix_relative_url(self, publish_type, rel_url):
"""
Fix post or page relative url to a standard, uniform format.
:param publish_type: publish type ('post' or 'page')
:param rel_url: relative url to fix
:return: tuple(fixed relative url or file path if exists else None,
file exists or not)
:raise ValueError: unknown publish type
"""
if publish_type == 'post':
return self.fix_post_relative_url(rel_url), False
elif publish_type == 'page':
return self.fix_page_relative_url(rel_url)
else:
raise ValueError(
'Publish type "{}" is not supported'.format(publish_type)) | [
"def",
"fix_relative_url",
"(",
"self",
",",
"publish_type",
",",
"rel_url",
")",
":",
"if",
"publish_type",
"==",
"'post'",
":",
"return",
"self",
".",
"fix_post_relative_url",
"(",
"rel_url",
")",
",",
"False",
"elif",
"publish_type",
"==",
"'page'",
":",
"return",
"self",
".",
"fix_page_relative_url",
"(",
"rel_url",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Publish type \"{}\" is not supported'",
".",
"format",
"(",
"publish_type",
")",
")"
] | python | Fix post or page relative url to a standard, uniform format.
:param publish_type: publish type ('post' or 'page')
:param rel_url: relative url to fix
:return: tuple(fixed relative url or file path if exists else None,
file exists or not)
:raise ValueError: unknown publish type | false |
2,407,569 | def get_policy_for_vhost(self, vhost, name):
"""
Get a specific policy for a vhost.
:param vhost: The virtual host the policy is for
:type vhost: str
:param name: The name of the policy
:type name: str
"""
return self._api_get('/api/policies/{0}/{1}'.format(
urllib.parse.quote_plus(vhost),
urllib.parse.quote_plus(name),
)) | [
"def",
"get_policy_for_vhost",
"(",
"self",
",",
"vhost",
",",
"name",
")",
":",
"return",
"self",
".",
"_api_get",
"(",
"'/api/policies/{0}/{1}'",
".",
"format",
"(",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"vhost",
")",
",",
"urllib",
".",
"parse",
".",
"quote_plus",
"(",
"name",
")",
",",
")",
")"
] | python | Get a specific policy for a vhost.
:param vhost: The virtual host the policy is for
:type vhost: str
:param name: The name of the policy
:type name: str | false |
2,443,028 | def clone(self):
"""Return a new bitfield with the same value.
The returned value is a copy, and so is no longer linked to the
original bitfield. This is important when the original is located
at anything other than normal memory, with accesses to it either
slow or having side effects. Creating a clone, and working
against that clone, means that only one read will occur.
"""
temp = self.__class__()
temp.base = self.base
return temp | [
"def",
"clone",
"(",
"self",
")",
":",
"temp",
"=",
"self",
".",
"__class__",
"(",
")",
"temp",
".",
"base",
"=",
"self",
".",
"base",
"return",
"temp"
] | python | Return a new bitfield with the same value.
The returned value is a copy, and so is no longer linked to the
original bitfield. This is important when the original is located
at anything other than normal memory, with accesses to it either
slow or having side effects. Creating a clone, and working
against that clone, means that only one read will occur. | false |
2,329,400 | def get_confidence(self):
"""return confidence based on existing data"""
# if we didn't receive any character in our consideration range,
# return negative answer
if self._mTotalChars <= 0 or self._mFreqChars <= MINIMUM_DATA_THRESHOLD:
return SURE_NO
if self._mTotalChars != self._mFreqChars:
r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars)
* self._mTypicalDistributionRatio))
if r < SURE_YES:
return r
# normalize confidence (we don't want to be 100% sure)
return SURE_YES | [
"def",
"get_confidence",
"(",
"self",
")",
":",
"if",
"self",
".",
"_mTotalChars",
"<=",
"0",
"or",
"self",
".",
"_mFreqChars",
"<=",
"MINIMUM_DATA_THRESHOLD",
":",
"return",
"SURE_NO",
"if",
"self",
".",
"_mTotalChars",
"!=",
"self",
".",
"_mFreqChars",
":",
"r",
"=",
"(",
"self",
".",
"_mFreqChars",
"/",
"(",
"(",
"self",
".",
"_mTotalChars",
"-",
"self",
".",
"_mFreqChars",
")",
"*",
"self",
".",
"_mTypicalDistributionRatio",
")",
")",
"if",
"r",
"<",
"SURE_YES",
":",
"return",
"r",
"return",
"SURE_YES"
] | python | return confidence based on existing data | false |
2,150,855 | def full_name(self):
"""
Obtains the full name of the actor.
:return: the full name
:rtype: str
"""
if self._full_name is None:
fn = self.name.replace(".", "\\.")
parent = self._parent
if parent is not None:
fn = parent.full_name + "." + fn
self._full_name = fn
return self._full_name | [
"def",
"full_name",
"(",
"self",
")",
":",
"if",
"self",
".",
"_full_name",
"is",
"None",
":",
"fn",
"=",
"self",
".",
"name",
".",
"replace",
"(",
"\".\"",
",",
"\"\\\\.\"",
")",
"parent",
"=",
"self",
".",
"_parent",
"if",
"parent",
"is",
"not",
"None",
":",
"fn",
"=",
"parent",
".",
"full_name",
"+",
"\".\"",
"+",
"fn",
"self",
".",
"_full_name",
"=",
"fn",
"return",
"self",
".",
"_full_name"
] | python | Obtains the full name of the actor.
:return: the full name
:rtype: str | false |
1,638,672 | def write_remote_map(self):
'''
Write the remote_map.txt
'''
remote_map = salt.utils.path.join(self.cache_root, 'remote_map.txt')
try:
with salt.utils.files.fopen(remote_map, 'w+') as fp_:
timestamp = \
datetime.now().strftime('%d %b %Y %H:%M:%S.%f')
fp_.write(
'# {0}_remote map as of {1}\n'.format(
self.role,
timestamp
)
)
for repo in self.remotes:
fp_.write(
salt.utils.stringutils.to_str(
'{0} = {1}\n'.format(
repo.cachedir_basename,
repo.id
)
)
)
except OSError:
pass
else:
log.info('Wrote new %s remote map to %s', self.role, remote_map) | [
"def",
"write_remote_map",
"(",
"self",
")",
":",
"remote_map",
"=",
"salt",
".",
"utils",
".",
"path",
".",
"join",
"(",
"self",
".",
"cache_root",
",",
"'remote_map.txt'",
")",
"try",
":",
"with",
"salt",
".",
"utils",
".",
"files",
".",
"fopen",
"(",
"remote_map",
",",
"'w+'",
")",
"as",
"fp_",
":",
"timestamp",
"=",
"datetime",
".",
"now",
"(",
")",
".",
"strftime",
"(",
"'%d %b %Y %H:%M:%S.%f'",
")",
"fp_",
".",
"write",
"(",
"'# {0}_remote map as of {1}\\n'",
".",
"format",
"(",
"self",
".",
"role",
",",
"timestamp",
")",
")",
"for",
"repo",
"in",
"self",
".",
"remotes",
":",
"fp_",
".",
"write",
"(",
"salt",
".",
"utils",
".",
"stringutils",
".",
"to_str",
"(",
"'{0} = {1}\\n'",
".",
"format",
"(",
"repo",
".",
"cachedir_basename",
",",
"repo",
".",
"id",
")",
")",
")",
"except",
"OSError",
":",
"pass",
"else",
":",
"log",
".",
"info",
"(",
"'Wrote new %s remote map to %s'",
",",
"self",
".",
"role",
",",
"remote_map",
")"
] | python | Write the remote_map.txt | false |
2,070,061 | def raise_exception(entity_type, entity, exception):
""" Exception helper """
raise exception(
u'The {} you have provided is not valid: {}'.format(entity_type, entity).encode('utf-8')
) | [
"def",
"raise_exception",
"(",
"entity_type",
",",
"entity",
",",
"exception",
")",
":",
"raise",
"exception",
"(",
"u'The {} you have provided is not valid: {}'",
".",
"format",
"(",
"entity_type",
",",
"entity",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")"
] | python | Exception helper | false |
1,695,339 | def __lock_location(self) -> None:
"""
Attempts to lock the location used by this writer. Will raise an error if the location is
already locked by another writer. Will do nothing if the location is already locked by
this writer.
"""
if not self._is_active:
if self._location in LogdirWriter._locked_locations:
raise RuntimeError('TensorBoard event file in directory %s with suffix %s '
'is already in use. At present multiple TensoBoard file writers '
'cannot write data into the same file.' % self._location)
LogdirWriter._locked_locations.add(self._location)
self._is_active = True | [
"def",
"__lock_location",
"(",
"self",
")",
"->",
"None",
":",
"if",
"not",
"self",
".",
"_is_active",
":",
"if",
"self",
".",
"_location",
"in",
"LogdirWriter",
".",
"_locked_locations",
":",
"raise",
"RuntimeError",
"(",
"'TensorBoard event file in directory %s with suffix %s '",
"'is already in use. At present multiple TensoBoard file writers '",
"'cannot write data into the same file.'",
"%",
"self",
".",
"_location",
")",
"LogdirWriter",
".",
"_locked_locations",
".",
"add",
"(",
"self",
".",
"_location",
")",
"self",
".",
"_is_active",
"=",
"True"
] | python | Attempts to lock the location used by this writer. Will raise an error if the location is
already locked by another writer. Will do nothing if the location is already locked by
this writer. | false |
2,278,358 | def convert_formula_to_atomic_fractions(formula):
"""
Converts a chemical formula to an atomic fraction :class:`dict`.
Args:
formula (str): chemical formula, like Al2O3. No wildcard are accepted.
"""
mole_fractions = {}
total_mole_fraction = 0.0
for match in CHEMICAL_FORMULA_PATTERN.finditer(formula):
symbol, mole_fraction = match.groups()
z = pyxray.element_atomic_number(symbol.strip())
if mole_fraction == '':
mole_fraction = 1.0
mole_fraction = float(mole_fraction)
mole_fraction = float(mole_fraction)
mole_fractions[z] = mole_fraction
total_mole_fraction += mole_fraction
# Calculate atomic fractions
atomic_fractions = {}
for z, mole_fraction in mole_fractions.items():
atomic_fractions[z] = mole_fraction / total_mole_fraction
return atomic_fractions | [
"def",
"convert_formula_to_atomic_fractions",
"(",
"formula",
")",
":",
"mole_fractions",
"=",
"{",
"}",
"total_mole_fraction",
"=",
"0.0",
"for",
"match",
"in",
"CHEMICAL_FORMULA_PATTERN",
".",
"finditer",
"(",
"formula",
")",
":",
"symbol",
",",
"mole_fraction",
"=",
"match",
".",
"groups",
"(",
")",
"z",
"=",
"pyxray",
".",
"element_atomic_number",
"(",
"symbol",
".",
"strip",
"(",
")",
")",
"if",
"mole_fraction",
"==",
"''",
":",
"mole_fraction",
"=",
"1.0",
"mole_fraction",
"=",
"float",
"(",
"mole_fraction",
")",
"mole_fraction",
"=",
"float",
"(",
"mole_fraction",
")",
"mole_fractions",
"[",
"z",
"]",
"=",
"mole_fraction",
"total_mole_fraction",
"+=",
"mole_fraction",
"atomic_fractions",
"=",
"{",
"}",
"for",
"z",
",",
"mole_fraction",
"in",
"mole_fractions",
".",
"items",
"(",
")",
":",
"atomic_fractions",
"[",
"z",
"]",
"=",
"mole_fraction",
"/",
"total_mole_fraction",
"return",
"atomic_fractions"
] | python | Converts a chemical formula to an atomic fraction :class:`dict`.
Args:
formula (str): chemical formula, like Al2O3. No wildcard are accepted. | false |
2,216,134 | def next_iteration(self, ref_point, bounds=None):
"""
Calculate the next iteration point to be shown to the DM
Parameters
----------
ref_point : list of float
Reference point given by the DM
"""
if bounds:
self.problem.points = reachable_points(
self.problem.points, self.problem.ideal, bounds
)
if not utils.isin(self.fh, self.problem.points) or ref_point != self.ref_point:
self.ref_point = list(ref_point)
self._update_fh()
self._update_zh(self.zh, self.fh)
self.fh_lo = list(self.lower_bounds_factory.result(self.zh))
self.fh_up = list(self.upper_bounds_factory.result(self.zh))
logging.debug(f"Updated upper boundary: {self.fh_up}")
logging.debug(f"Uppadet lower boundary: {self.fh_lo}")
if not np.all(np.array(self.fh_up) > np.array(self.fh_lo)):
warn(self.NegativeIntervalWarning())
assert utils.isin(self.fh_up, self.problem.points)
assert utils.isin(self.fh_lo, self.problem.points)
dist = self.distance(self.zh, self.fh)
# Reachable points
self.update_points()
lP = len(self.problem.points)
self.current_iter -= 1
return dist, self.fh, self.zh, self.fh_lo, self.fh_up, lP | [
"def",
"next_iteration",
"(",
"self",
",",
"ref_point",
",",
"bounds",
"=",
"None",
")",
":",
"if",
"bounds",
":",
"self",
".",
"problem",
".",
"points",
"=",
"reachable_points",
"(",
"self",
".",
"problem",
".",
"points",
",",
"self",
".",
"problem",
".",
"ideal",
",",
"bounds",
")",
"if",
"not",
"utils",
".",
"isin",
"(",
"self",
".",
"fh",
",",
"self",
".",
"problem",
".",
"points",
")",
"or",
"ref_point",
"!=",
"self",
".",
"ref_point",
":",
"self",
".",
"ref_point",
"=",
"list",
"(",
"ref_point",
")",
"self",
".",
"_update_fh",
"(",
")",
"self",
".",
"_update_zh",
"(",
"self",
".",
"zh",
",",
"self",
".",
"fh",
")",
"self",
".",
"fh_lo",
"=",
"list",
"(",
"self",
".",
"lower_bounds_factory",
".",
"result",
"(",
"self",
".",
"zh",
")",
")",
"self",
".",
"fh_up",
"=",
"list",
"(",
"self",
".",
"upper_bounds_factory",
".",
"result",
"(",
"self",
".",
"zh",
")",
")",
"logging",
".",
"debug",
"(",
"f\"Updated upper boundary: {self.fh_up}\"",
")",
"logging",
".",
"debug",
"(",
"f\"Uppadet lower boundary: {self.fh_lo}\"",
")",
"if",
"not",
"np",
".",
"all",
"(",
"np",
".",
"array",
"(",
"self",
".",
"fh_up",
")",
">",
"np",
".",
"array",
"(",
"self",
".",
"fh_lo",
")",
")",
":",
"warn",
"(",
"self",
".",
"NegativeIntervalWarning",
"(",
")",
")",
"assert",
"utils",
".",
"isin",
"(",
"self",
".",
"fh_up",
",",
"self",
".",
"problem",
".",
"points",
")",
"assert",
"utils",
".",
"isin",
"(",
"self",
".",
"fh_lo",
",",
"self",
".",
"problem",
".",
"points",
")",
"dist",
"=",
"self",
".",
"distance",
"(",
"self",
".",
"zh",
",",
"self",
".",
"fh",
")",
"self",
".",
"update_points",
"(",
")",
"lP",
"=",
"len",
"(",
"self",
".",
"problem",
".",
"points",
")",
"self",
".",
"current_iter",
"-=",
"1",
"return",
"dist",
",",
"self",
".",
"fh",
",",
"self",
".",
"zh",
",",
"self",
".",
"fh_lo",
",",
"self",
".",
"fh_up",
",",
"lP"
] | python | Calculate the next iteration point to be shown to the DM
Parameters
----------
ref_point : list of float
Reference point given by the DM | false |
1,570,360 | def summary(self):
"""
Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return GaussianMixtureSummary(super(GaussianMixtureModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__) | [
"def",
"summary",
"(",
"self",
")",
":",
"if",
"self",
".",
"hasSummary",
":",
"return",
"GaussianMixtureSummary",
"(",
"super",
"(",
"GaussianMixtureModel",
",",
"self",
")",
".",
"summary",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"No training summary available for this %s\"",
"%",
"self",
".",
"__class__",
".",
"__name__",
")"
] | python | Gets summary (e.g. cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists. | false |
1,708,003 | def get_chempot_correction(element, temp, pres):
"""
Get the normalized correction term Δμ for chemical potential of a gas
phase consisting of element at given temperature and pressure,
referenced to that in the standard state (T_std = 298.15 K,
T_std = 1 bar). The gas phase is limited to be one of O2, N2, Cl2,
F2, H2. Calculation formula can be found in the documentation of
Materials Project website.
Args:
element (string): The string representing the element.
temp (float): The temperature of the gas phase.
pres (float): The pressure of the gas phase.
Returns:
The correction of chemical potential in eV/atom of the gas
phase at given temperature and pressure.
"""
if element not in ["O", "N", "Cl", "F", "H"]:
return 0
std_temp = 298.15
std_pres = 1E5
ideal_gas_const = 8.3144598
# Cp and S at standard state in J/(K.mol). Data from
# https://janaf.nist.gov/tables/O-029.html
# https://janaf.nist.gov/tables/N-023.html
# https://janaf.nist.gov/tables/Cl-073.html
# https://janaf.nist.gov/tables/F-054.html
# https://janaf.nist.gov/tables/H-050.html
Cp_dict = {"O": 29.376,
"N": 29.124,
"Cl": 33.949,
"F": 31.302,
"H": 28.836}
S_dict = {"O": 205.147,
"N": 191.609,
"Cl": 223.079,
"F": 202.789,
"H": 130.680}
Cp_std = Cp_dict[element]
S_std = S_dict[element]
PV_correction = ideal_gas_const * temp * np.log(pres / std_pres)
TS_correction = - Cp_std * (temp * np.log(temp)
- std_temp * np.log(std_temp)) \
+ Cp_std * (temp - std_temp) \
* (1 + np.log(std_temp)) \
- S_std * (temp - std_temp)
dG = PV_correction + TS_correction
# Convert to eV/molecule unit.
dG /= 1000 * InterfacialReactivity.EV_TO_KJ_PER_MOL
# Normalize by number of atoms in the gas molecule. For elements
# considered, the gas molecules are all diatomic.
dG /= 2
return dG | [
"def",
"get_chempot_correction",
"(",
"element",
",",
"temp",
",",
"pres",
")",
":",
"if",
"element",
"not",
"in",
"[",
"\"O\"",
",",
"\"N\"",
",",
"\"Cl\"",
",",
"\"F\"",
",",
"\"H\"",
"]",
":",
"return",
"0",
"std_temp",
"=",
"298.15",
"std_pres",
"=",
"1E5",
"ideal_gas_const",
"=",
"8.3144598",
"Cp_dict",
"=",
"{",
"\"O\"",
":",
"29.376",
",",
"\"N\"",
":",
"29.124",
",",
"\"Cl\"",
":",
"33.949",
",",
"\"F\"",
":",
"31.302",
",",
"\"H\"",
":",
"28.836",
"}",
"S_dict",
"=",
"{",
"\"O\"",
":",
"205.147",
",",
"\"N\"",
":",
"191.609",
",",
"\"Cl\"",
":",
"223.079",
",",
"\"F\"",
":",
"202.789",
",",
"\"H\"",
":",
"130.680",
"}",
"Cp_std",
"=",
"Cp_dict",
"[",
"element",
"]",
"S_std",
"=",
"S_dict",
"[",
"element",
"]",
"PV_correction",
"=",
"ideal_gas_const",
"*",
"temp",
"*",
"np",
".",
"log",
"(",
"pres",
"/",
"std_pres",
")",
"TS_correction",
"=",
"-",
"Cp_std",
"*",
"(",
"temp",
"*",
"np",
".",
"log",
"(",
"temp",
")",
"-",
"std_temp",
"*",
"np",
".",
"log",
"(",
"std_temp",
")",
")",
"+",
"Cp_std",
"*",
"(",
"temp",
"-",
"std_temp",
")",
"*",
"(",
"1",
"+",
"np",
".",
"log",
"(",
"std_temp",
")",
")",
"-",
"S_std",
"*",
"(",
"temp",
"-",
"std_temp",
")",
"dG",
"=",
"PV_correction",
"+",
"TS_correction",
"dG",
"/=",
"1000",
"*",
"InterfacialReactivity",
".",
"EV_TO_KJ_PER_MOL",
"dG",
"/=",
"2",
"return",
"dG"
] | python | Get the normalized correction term Δμ for chemical potential of a gas
phase consisting of element at given temperature and pressure,
referenced to that in the standard state (T_std = 298.15 K,
T_std = 1 bar). The gas phase is limited to be one of O2, N2, Cl2,
F2, H2. Calculation formula can be found in the documentation of
Materials Project website.
Args:
element (string): The string representing the element.
temp (float): The temperature of the gas phase.
pres (float): The pressure of the gas phase.
Returns:
The correction of chemical potential in eV/atom of the gas
phase at given temperature and pressure. | false |
2,153,958 | def cursor(self):
"""The Cursor this Token corresponds to."""
cursor = Cursor()
cursor._tu = self._tu
conf.lib.clang_annotateTokens(self._tu, byref(self), 1, byref(cursor))
return cursor | [
"def",
"cursor",
"(",
"self",
")",
":",
"cursor",
"=",
"Cursor",
"(",
")",
"cursor",
".",
"_tu",
"=",
"self",
".",
"_tu",
"conf",
".",
"lib",
".",
"clang_annotateTokens",
"(",
"self",
".",
"_tu",
",",
"byref",
"(",
"self",
")",
",",
"1",
",",
"byref",
"(",
"cursor",
")",
")",
"return",
"cursor"
] | python | The Cursor this Token corresponds to. | false |
2,305,174 | def render_audio(self):
"""
Synthesize audio from the episode's text.
"""
segment = text_to_speech(self._text, self.synthesizer, self.synth_args, self.sentence_break)
milli = len(segment)
seconds = '{0:.1f}'.format(float(milli) / 1000 % 60).zfill(2)
minutes = '{0:.0f}'.format((milli / (1000 * 60)) % 60).zfill(2)
hours = '{0:.0f}'.format((milli / (1000 * 60 * 60)) % 24).zfill(2)
self.duration = hours + ':' + minutes + ':' + seconds
segment.export(self.link, format='mp3')
self.length = os.path.getsize(self.link) | [
"def",
"render_audio",
"(",
"self",
")",
":",
"segment",
"=",
"text_to_speech",
"(",
"self",
".",
"_text",
",",
"self",
".",
"synthesizer",
",",
"self",
".",
"synth_args",
",",
"self",
".",
"sentence_break",
")",
"milli",
"=",
"len",
"(",
"segment",
")",
"seconds",
"=",
"'{0:.1f}'",
".",
"format",
"(",
"float",
"(",
"milli",
")",
"/",
"1000",
"%",
"60",
")",
".",
"zfill",
"(",
"2",
")",
"minutes",
"=",
"'{0:.0f}'",
".",
"format",
"(",
"(",
"milli",
"/",
"(",
"1000",
"*",
"60",
")",
")",
"%",
"60",
")",
".",
"zfill",
"(",
"2",
")",
"hours",
"=",
"'{0:.0f}'",
".",
"format",
"(",
"(",
"milli",
"/",
"(",
"1000",
"*",
"60",
"*",
"60",
")",
")",
"%",
"24",
")",
".",
"zfill",
"(",
"2",
")",
"self",
".",
"duration",
"=",
"hours",
"+",
"':'",
"+",
"minutes",
"+",
"':'",
"+",
"seconds",
"segment",
".",
"export",
"(",
"self",
".",
"link",
",",
"format",
"=",
"'mp3'",
")",
"self",
".",
"length",
"=",
"os",
".",
"path",
".",
"getsize",
"(",
"self",
".",
"link",
")"
] | python | Synthesize audio from the episode's text. | false |
2,138,525 | def export(self, version, export_dir):
"""
Create prov entities and activities.
"""
atts = [
(PROV['type'], self.type),
(PROV['label'], self.label),
]
if version['num'] == "1.0.0":
atts += [
(NIDM_USER_SPECIFIED_THRESHOLD_TYPE, self.user_threshold_type),
(PROV['value'], self.stat_threshold),
(NIDM_P_VALUE_UNCORRECTED, self.p_uncorr_threshold),
(NIDM_P_VALUE_FWER, self.p_corr_threshold)
]
else:
atts += [
(PROV['type'], self.threshold_type),
(PROV['value'], self.value)
]
if self.equiv_thresh is not None:
for equiv in self.equiv_thresh:
atts += [
(NIDM_EQUIVALENT_THRESHOLD, equiv.id)
]
self.add_attributes([(k, v) for k, v in atts if v is not None]) | [
"def",
"export",
"(",
"self",
",",
"version",
",",
"export_dir",
")",
":",
"atts",
"=",
"[",
"(",
"PROV",
"[",
"'type'",
"]",
",",
"self",
".",
"type",
")",
",",
"(",
"PROV",
"[",
"'label'",
"]",
",",
"self",
".",
"label",
")",
",",
"]",
"if",
"version",
"[",
"'num'",
"]",
"==",
"\"1.0.0\"",
":",
"atts",
"+=",
"[",
"(",
"NIDM_USER_SPECIFIED_THRESHOLD_TYPE",
",",
"self",
".",
"user_threshold_type",
")",
",",
"(",
"PROV",
"[",
"'value'",
"]",
",",
"self",
".",
"stat_threshold",
")",
",",
"(",
"NIDM_P_VALUE_UNCORRECTED",
",",
"self",
".",
"p_uncorr_threshold",
")",
",",
"(",
"NIDM_P_VALUE_FWER",
",",
"self",
".",
"p_corr_threshold",
")",
"]",
"else",
":",
"atts",
"+=",
"[",
"(",
"PROV",
"[",
"'type'",
"]",
",",
"self",
".",
"threshold_type",
")",
",",
"(",
"PROV",
"[",
"'value'",
"]",
",",
"self",
".",
"value",
")",
"]",
"if",
"self",
".",
"equiv_thresh",
"is",
"not",
"None",
":",
"for",
"equiv",
"in",
"self",
".",
"equiv_thresh",
":",
"atts",
"+=",
"[",
"(",
"NIDM_EQUIVALENT_THRESHOLD",
",",
"equiv",
".",
"id",
")",
"]",
"self",
".",
"add_attributes",
"(",
"[",
"(",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"atts",
"if",
"v",
"is",
"not",
"None",
"]",
")"
] | python | Create prov entities and activities. | false |
2,260,589 | def _process_single_run(kwargs):
"""Wrapper function that first configures logging and starts a single run afterwards."""
_configure_niceness(kwargs)
_configure_logging(kwargs)
result_queue = kwargs['result_queue']
result = _sigint_handling_single_run(kwargs)
result_queue.put(result)
result_queue.close() | [
"def",
"_process_single_run",
"(",
"kwargs",
")",
":",
"_configure_niceness",
"(",
"kwargs",
")",
"_configure_logging",
"(",
"kwargs",
")",
"result_queue",
"=",
"kwargs",
"[",
"'result_queue'",
"]",
"result",
"=",
"_sigint_handling_single_run",
"(",
"kwargs",
")",
"result_queue",
".",
"put",
"(",
"result",
")",
"result_queue",
".",
"close",
"(",
")"
] | python | Wrapper function that first configures logging and starts a single run afterwards. | false |
2,498,253 | def basic_consume(self, queue='', consumer_tag='', no_local=False,
no_ack=False, exclusive=False, nowait=False,
callback=None, ticket=None):
"""
start a queue consumer
This method asks the server to start a "consumer", which is a
transient request for messages from a specific queue.
Consumers last as long as the channel they were created on, or
until the client cancels them.
RULE:
The server SHOULD support at least 16 consumers per queue,
unless the queue was declared as private, and ideally,
impose no limit except as defined by available resources.
PARAMETERS:
queue: shortstr
Specifies the name of the queue to consume from. If
the queue name is null, refers to the current queue
for the channel, which is the last declared queue.
RULE:
If the client did not previously declare a queue,
and the queue name in this method is empty, the
server MUST raise a connection exception with
reply code 530 (not allowed).
consumer_tag: shortstr
Specifies the identifier for the consumer. The
consumer tag is local to a connection, so two clients
can use the same consumer tags. If this field is empty
the server will generate a unique tag.
RULE:
The tag MUST NOT refer to an existing consumer. If
the client attempts to create two consumers with
the same non-empty tag the server MUST raise a
connection exception with reply code 530 (not
allowed).
no_local: boolean
do not deliver own messages
If the no-local field is set the server will not send
messages to the client that published them.
no_ack: boolean
no acknowledgement needed
If this field is set the server does not expect
acknowledgments for messages. That is, when a message
is delivered to the client the server automatically and
silently acknowledges it on behalf of the client. This
functionality increases performance but at the cost of
reliability. Messages can get lost if a client dies
before it can deliver them to the application.
exclusive: boolean
request exclusive access
Request exclusive consumer access, meaning only this
consumer can access the queue.
RULE:
If the server cannot grant exclusive access to the
queue when asked, - because there are other
consumers active - it MUST raise a channel
exception with return code 403 (access refused).
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
callback: Python callable
function/method called with each delivered message
For each message delivered by the broker, the
callable will be called with a Message object
as the single argument. If no callable is specified,
messages are quietly discarded, no_ack should probably
be set to True in that case.
ticket: short
RULE:
The client MUST provide a valid access ticket
giving "read" access rights to the realm for the
queue.
"""
args = AMQPWriter()
if ticket is not None:
args.write_short(ticket)
else:
args.write_short(self.default_ticket)
args.write_shortstr(queue)
args.write_shortstr(consumer_tag)
args.write_bit(no_local)
args.write_bit(no_ack)
args.write_bit(exclusive)
args.write_bit(nowait)
self._send_method((60, 20), args)
if not nowait:
consumer_tag = self.wait(allowed_methods=[
(60, 21), # Channel.basic_consume_ok
])
self.callbacks[consumer_tag] = callback
return consumer_tag | [
"def",
"basic_consume",
"(",
"self",
",",
"queue",
"=",
"''",
",",
"consumer_tag",
"=",
"''",
",",
"no_local",
"=",
"False",
",",
"no_ack",
"=",
"False",
",",
"exclusive",
"=",
"False",
",",
"nowait",
"=",
"False",
",",
"callback",
"=",
"None",
",",
"ticket",
"=",
"None",
")",
":",
"args",
"=",
"AMQPWriter",
"(",
")",
"if",
"ticket",
"is",
"not",
"None",
":",
"args",
".",
"write_short",
"(",
"ticket",
")",
"else",
":",
"args",
".",
"write_short",
"(",
"self",
".",
"default_ticket",
")",
"args",
".",
"write_shortstr",
"(",
"queue",
")",
"args",
".",
"write_shortstr",
"(",
"consumer_tag",
")",
"args",
".",
"write_bit",
"(",
"no_local",
")",
"args",
".",
"write_bit",
"(",
"no_ack",
")",
"args",
".",
"write_bit",
"(",
"exclusive",
")",
"args",
".",
"write_bit",
"(",
"nowait",
")",
"self",
".",
"_send_method",
"(",
"(",
"60",
",",
"20",
")",
",",
"args",
")",
"if",
"not",
"nowait",
":",
"consumer_tag",
"=",
"self",
".",
"wait",
"(",
"allowed_methods",
"=",
"[",
"(",
"60",
",",
"21",
")",
",",
"]",
")",
"self",
".",
"callbacks",
"[",
"consumer_tag",
"]",
"=",
"callback",
"return",
"consumer_tag"
] | python | start a queue consumer
This method asks the server to start a "consumer", which is a
transient request for messages from a specific queue.
Consumers last as long as the channel they were created on, or
until the client cancels them.
RULE:
The server SHOULD support at least 16 consumers per queue,
unless the queue was declared as private, and ideally,
impose no limit except as defined by available resources.
PARAMETERS:
queue: shortstr
Specifies the name of the queue to consume from. If
the queue name is null, refers to the current queue
for the channel, which is the last declared queue.
RULE:
If the client did not previously declare a queue,
and the queue name in this method is empty, the
server MUST raise a connection exception with
reply code 530 (not allowed).
consumer_tag: shortstr
Specifies the identifier for the consumer. The
consumer tag is local to a connection, so two clients
can use the same consumer tags. If this field is empty
the server will generate a unique tag.
RULE:
The tag MUST NOT refer to an existing consumer. If
the client attempts to create two consumers with
the same non-empty tag the server MUST raise a
connection exception with reply code 530 (not
allowed).
no_local: boolean
do not deliver own messages
If the no-local field is set the server will not send
messages to the client that published them.
no_ack: boolean
no acknowledgement needed
If this field is set the server does not expect
acknowledgments for messages. That is, when a message
is delivered to the client the server automatically and
silently acknowledges it on behalf of the client. This
functionality increases performance but at the cost of
reliability. Messages can get lost if a client dies
before it can deliver them to the application.
exclusive: boolean
request exclusive access
Request exclusive consumer access, meaning only this
consumer can access the queue.
RULE:
If the server cannot grant exclusive access to the
queue when asked, - because there are other
consumers active - it MUST raise a channel
exception with return code 403 (access refused).
nowait: boolean
do not send a reply method
If set, the server will not respond to the method. The
client should not wait for a reply method. If the
server could not complete the method it will raise a
channel or connection exception.
callback: Python callable
function/method called with each delivered message
For each message delivered by the broker, the
callable will be called with a Message object
as the single argument. If no callable is specified,
messages are quietly discarded, no_ack should probably
be set to True in that case.
ticket: short
RULE:
The client MUST provide a valid access ticket
giving "read" access rights to the realm for the
queue. | false |
1,620,580 | def delete_node(self, name, **kwargs):
"""
delete a Node
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_node(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Node (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_node_with_http_info(name, **kwargs)
else:
(data) = self.delete_node_with_http_info(name, **kwargs)
return data | [
"def",
"delete_node",
"(",
"self",
",",
"name",
",",
"**",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async_req'",
")",
":",
"return",
"self",
".",
"delete_node_with_http_info",
"(",
"name",
",",
"**",
"kwargs",
")",
"else",
":",
"(",
"data",
")",
"=",
"self",
".",
"delete_node_with_http_info",
"(",
"name",
",",
"**",
"kwargs",
")",
"return",
"data"
] | python | delete a Node
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_node(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Node (required)
:param str pretty: If 'true', then the output is pretty printed.
:param V1DeleteOptions body:
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread. | false |
2,405,123 | def add_attachment_viewer_widget(self, attachment_property, custom_title=False, height=None):
"""
Add a KE-chain Attachment Viewer (e.g. attachment viewer widget) to the customization.
The widget will be saved to KE-chain.
:param attachment_property: The Attachment Property to which the Viewer will be connected to.
:type attachment_property: :class:`Property` or UUID
:param custom_title: A custom title for the attachment viewer widget
* False (default): Notebook name
* String value: Custom title
* None: No title
:type custom_title: bool or basestring or None
:param height: The height of the Notebook in pixels
:type height: int or None
:raises IllegalArgumentError: When unknown or illegal arguments are passed.
"""
# Check whether the attachment property is uuid type or class `Property`
if isinstance(attachment_property, Property):
attachment_property_id = attachment_property.id
elif isinstance(attachment_property, text_type) and is_uuid(attachment_property):
attachment_property_id = attachment_property
attachment_property = self._client.property(id=attachment_property_id)
else:
raise IllegalArgumentError("When using the add_attachment_viewer_widget, attachment_property must be a "
"Property or Property id. Type is: {}".format(type(attachment_property)))
# Check whether the `Property` has type `Attachment`
property_type = attachment_property.type
if property_type != PropertyType.ATTACHMENT_VALUE:
raise IllegalArgumentError("When using the add_attachment_viewer_widget, attachment_property must have "
"type {}. Type found: {}".format(PropertyType.ATTACHMENT_VALUE, property_type))
# Check also whether `Property` has category `Instance`
property_category = attachment_property._json_data['category']
if property_category != Category.INSTANCE:
raise IllegalArgumentError("When using the add_attachment_viewer_widget, attachment_property must have "
"category {}. Category found: {}".format(Category.INSTANCE, property_category))
# Add custom title
if custom_title is False:
show_title_value = "Default"
title = attachment_property.name
elif custom_title is None:
show_title_value = "No title"
title = ''
else:
show_title_value = "Custom title"
title = str(custom_title)
# Declare attachment viewer widget config
config = {
'propertyId': attachment_property_id,
'showTitleValue': show_title_value,
'xtype': ComponentXType.PROPERTYATTACHMENTPREVIEWER,
'title': title,
'filter': {
'activity_id': str(self.activity.id)
},
'height': height if height else 500
}
# Declare attachment viewer widget meta
meta = {
'propertyInstanceId': attachment_property_id,
'activityId': str(self.activity.id),
'customHeight': height if height else 500,
'showTitleValue': show_title_value,
'customTitle': title
}
self._add_widget(dict(config=config, meta=meta, name=WidgetNames.ATTACHMENTVIEWERWIDGET)) | [
"def",
"add_attachment_viewer_widget",
"(",
"self",
",",
"attachment_property",
",",
"custom_title",
"=",
"False",
",",
"height",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"attachment_property",
",",
"Property",
")",
":",
"attachment_property_id",
"=",
"attachment_property",
".",
"id",
"elif",
"isinstance",
"(",
"attachment_property",
",",
"text_type",
")",
"and",
"is_uuid",
"(",
"attachment_property",
")",
":",
"attachment_property_id",
"=",
"attachment_property",
"attachment_property",
"=",
"self",
".",
"_client",
".",
"property",
"(",
"id",
"=",
"attachment_property_id",
")",
"else",
":",
"raise",
"IllegalArgumentError",
"(",
"\"When using the add_attachment_viewer_widget, attachment_property must be a \"",
"\"Property or Property id. Type is: {}\"",
".",
"format",
"(",
"type",
"(",
"attachment_property",
")",
")",
")",
"property_type",
"=",
"attachment_property",
".",
"type",
"if",
"property_type",
"!=",
"PropertyType",
".",
"ATTACHMENT_VALUE",
":",
"raise",
"IllegalArgumentError",
"(",
"\"When using the add_attachment_viewer_widget, attachment_property must have \"",
"\"type {}. Type found: {}\"",
".",
"format",
"(",
"PropertyType",
".",
"ATTACHMENT_VALUE",
",",
"property_type",
")",
")",
"property_category",
"=",
"attachment_property",
".",
"_json_data",
"[",
"'category'",
"]",
"if",
"property_category",
"!=",
"Category",
".",
"INSTANCE",
":",
"raise",
"IllegalArgumentError",
"(",
"\"When using the add_attachment_viewer_widget, attachment_property must have \"",
"\"category {}. Category found: {}\"",
".",
"format",
"(",
"Category",
".",
"INSTANCE",
",",
"property_category",
")",
")",
"if",
"custom_title",
"is",
"False",
":",
"show_title_value",
"=",
"\"Default\"",
"title",
"=",
"attachment_property",
".",
"name",
"elif",
"custom_title",
"is",
"None",
":",
"show_title_value",
"=",
"\"No title\"",
"title",
"=",
"''",
"else",
":",
"show_title_value",
"=",
"\"Custom title\"",
"title",
"=",
"str",
"(",
"custom_title",
")",
"config",
"=",
"{",
"'propertyId'",
":",
"attachment_property_id",
",",
"'showTitleValue'",
":",
"show_title_value",
",",
"'xtype'",
":",
"ComponentXType",
".",
"PROPERTYATTACHMENTPREVIEWER",
",",
"'title'",
":",
"title",
",",
"'filter'",
":",
"{",
"'activity_id'",
":",
"str",
"(",
"self",
".",
"activity",
".",
"id",
")",
"}",
",",
"'height'",
":",
"height",
"if",
"height",
"else",
"500",
"}",
"meta",
"=",
"{",
"'propertyInstanceId'",
":",
"attachment_property_id",
",",
"'activityId'",
":",
"str",
"(",
"self",
".",
"activity",
".",
"id",
")",
",",
"'customHeight'",
":",
"height",
"if",
"height",
"else",
"500",
",",
"'showTitleValue'",
":",
"show_title_value",
",",
"'customTitle'",
":",
"title",
"}",
"self",
".",
"_add_widget",
"(",
"dict",
"(",
"config",
"=",
"config",
",",
"meta",
"=",
"meta",
",",
"name",
"=",
"WidgetNames",
".",
"ATTACHMENTVIEWERWIDGET",
")",
")"
] | python | Add a KE-chain Attachment Viewer (e.g. attachment viewer widget) to the customization.
The widget will be saved to KE-chain.
:param attachment_property: The Attachment Property to which the Viewer will be connected to.
:type attachment_property: :class:`Property` or UUID
:param custom_title: A custom title for the attachment viewer widget
* False (default): Notebook name
* String value: Custom title
* None: No title
:type custom_title: bool or basestring or None
:param height: The height of the Notebook in pixels
:type height: int or None
:raises IllegalArgumentError: When unknown or illegal arguments are passed. | false |
2,137,424 | def __init__(self,
nlp,
rules: Dict,
extractor_name: str) -> None:
"""
Initialize the extractor, storing the rule information and construct spacy rules
Args:
nlp
rules (Dict): spacy rules
extractor_name: str
Returns:
"""
Extractor.__init__(self,
input_type=InputType.TEXT,
category="spacy_rule_extractor",
name=extractor_name)
self._rules = rules["rules"]
self._nlp = copy.deepcopy(nlp)
self._tokenizer = Tokenizer(self._nlp)
self._matcher = Matcher(self._nlp.vocab)
self._field_name = rules["field_name"] if "field_name" in rules else extractor_name
self._rule_lst = {}
self._hash_map = {}
for idx, a_rule in enumerate(self._rules):
this_rule = Rule(a_rule, self._nlp)
self._rule_lst[this_rule.identifier + "rule_id##" + str(idx)] = this_rule | [
"def",
"__init__",
"(",
"self",
",",
"nlp",
",",
"rules",
":",
"Dict",
",",
"extractor_name",
":",
"str",
")",
"->",
"None",
":",
"Extractor",
".",
"__init__",
"(",
"self",
",",
"input_type",
"=",
"InputType",
".",
"TEXT",
",",
"category",
"=",
"\"spacy_rule_extractor\"",
",",
"name",
"=",
"extractor_name",
")",
"self",
".",
"_rules",
"=",
"rules",
"[",
"\"rules\"",
"]",
"self",
".",
"_nlp",
"=",
"copy",
".",
"deepcopy",
"(",
"nlp",
")",
"self",
".",
"_tokenizer",
"=",
"Tokenizer",
"(",
"self",
".",
"_nlp",
")",
"self",
".",
"_matcher",
"=",
"Matcher",
"(",
"self",
".",
"_nlp",
".",
"vocab",
")",
"self",
".",
"_field_name",
"=",
"rules",
"[",
"\"field_name\"",
"]",
"if",
"\"field_name\"",
"in",
"rules",
"else",
"extractor_name",
"self",
".",
"_rule_lst",
"=",
"{",
"}",
"self",
".",
"_hash_map",
"=",
"{",
"}",
"for",
"idx",
",",
"a_rule",
"in",
"enumerate",
"(",
"self",
".",
"_rules",
")",
":",
"this_rule",
"=",
"Rule",
"(",
"a_rule",
",",
"self",
".",
"_nlp",
")",
"self",
".",
"_rule_lst",
"[",
"this_rule",
".",
"identifier",
"+",
"\"rule_id##\"",
"+",
"str",
"(",
"idx",
")",
"]",
"=",
"this_rule"
] | python | Initialize the extractor, storing the rule information and construct spacy rules
Args:
nlp
rules (Dict): spacy rules
extractor_name: str
Returns: | false |
2,579,317 | def init_host(self):
"""
Initial host
"""
env.host_string = self.host_string
env.user = self.host_user
env.password = self.host_passwd
env.key_filename = self.host_keyfile | [
"def",
"init_host",
"(",
"self",
")",
":",
"env",
".",
"host_string",
"=",
"self",
".",
"host_string",
"env",
".",
"user",
"=",
"self",
".",
"host_user",
"env",
".",
"password",
"=",
"self",
".",
"host_passwd",
"env",
".",
"key_filename",
"=",
"self",
".",
"host_keyfile"
] | python | Initial host | false |
1,944,195 | def get_hit_status(self, hitid):
''' Get HIT status '''
hitdata = self.get_hit(hitid)
if not hitdata:
return False
return hitdata['HITStatus'] | [
"def",
"get_hit_status",
"(",
"self",
",",
"hitid",
")",
":",
"hitdata",
"=",
"self",
".",
"get_hit",
"(",
"hitid",
")",
"if",
"not",
"hitdata",
":",
"return",
"False",
"return",
"hitdata",
"[",
"'HITStatus'",
"]"
] | python | Get HIT status | false |
1,893,841 | def get_benchmark_from_name(root, name, extra_params=None):
"""
Create a benchmark from a fully-qualified benchmark name.
Parameters
----------
root : str
Path to the root of a benchmark suite.
name : str
Fully-qualified name to a specific benchmark.
"""
if '-' in name:
try:
name, param_idx = name.split('-', 1)
param_idx = int(param_idx)
except ValueError:
raise ValueError("Benchmark id %r is invalid" % (name,))
else:
param_idx = None
update_sys_path(root)
benchmark = None
# try to directly import benchmark function by guessing its import module
# name
parts = name.split('.')
for i in [1, 2]:
path = os.path.join(root, *parts[:-i]) + '.py'
if not os.path.isfile(path):
continue
modname = '.'.join([os.path.basename(root)] + parts[:-i])
module = import_module(modname)
try:
module_attr = getattr(module, parts[-i])
except AttributeError:
break
if i == 1 and inspect.isfunction(module_attr):
benchmark = _get_benchmark(parts[-i], module, None, module_attr)
break
elif i == 2 and inspect.isclass(module_attr):
try:
class_attr = getattr(module_attr, parts[-1])
except AttributeError:
break
if (inspect.isfunction(class_attr) or
inspect.ismethod(class_attr)):
benchmark = _get_benchmark(parts[-1], module, module_attr,
class_attr)
break
if benchmark is None:
for benchmark in disc_benchmarks(root):
if benchmark.name == name:
break
else:
raise ValueError(
"Could not find benchmark '{0}'".format(name))
if param_idx is not None:
benchmark.set_param_idx(param_idx)
if extra_params:
class ExtraBenchmarkAttrs:
pass
for key, value in extra_params.items():
setattr(ExtraBenchmarkAttrs, key, value)
benchmark._attr_sources.insert(0, ExtraBenchmarkAttrs)
return benchmark | [
"def",
"get_benchmark_from_name",
"(",
"root",
",",
"name",
",",
"extra_params",
"=",
"None",
")",
":",
"if",
"'-'",
"in",
"name",
":",
"try",
":",
"name",
",",
"param_idx",
"=",
"name",
".",
"split",
"(",
"'-'",
",",
"1",
")",
"param_idx",
"=",
"int",
"(",
"param_idx",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"Benchmark id %r is invalid\"",
"%",
"(",
"name",
",",
")",
")",
"else",
":",
"param_idx",
"=",
"None",
"update_sys_path",
"(",
"root",
")",
"benchmark",
"=",
"None",
"parts",
"=",
"name",
".",
"split",
"(",
"'.'",
")",
"for",
"i",
"in",
"[",
"1",
",",
"2",
"]",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"*",
"parts",
"[",
":",
"-",
"i",
"]",
")",
"+",
"'.py'",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"continue",
"modname",
"=",
"'.'",
".",
"join",
"(",
"[",
"os",
".",
"path",
".",
"basename",
"(",
"root",
")",
"]",
"+",
"parts",
"[",
":",
"-",
"i",
"]",
")",
"module",
"=",
"import_module",
"(",
"modname",
")",
"try",
":",
"module_attr",
"=",
"getattr",
"(",
"module",
",",
"parts",
"[",
"-",
"i",
"]",
")",
"except",
"AttributeError",
":",
"break",
"if",
"i",
"==",
"1",
"and",
"inspect",
".",
"isfunction",
"(",
"module_attr",
")",
":",
"benchmark",
"=",
"_get_benchmark",
"(",
"parts",
"[",
"-",
"i",
"]",
",",
"module",
",",
"None",
",",
"module_attr",
")",
"break",
"elif",
"i",
"==",
"2",
"and",
"inspect",
".",
"isclass",
"(",
"module_attr",
")",
":",
"try",
":",
"class_attr",
"=",
"getattr",
"(",
"module_attr",
",",
"parts",
"[",
"-",
"1",
"]",
")",
"except",
"AttributeError",
":",
"break",
"if",
"(",
"inspect",
".",
"isfunction",
"(",
"class_attr",
")",
"or",
"inspect",
".",
"ismethod",
"(",
"class_attr",
")",
")",
":",
"benchmark",
"=",
"_get_benchmark",
"(",
"parts",
"[",
"-",
"1",
"]",
",",
"module",
",",
"module_attr",
",",
"class_attr",
")",
"break",
"if",
"benchmark",
"is",
"None",
":",
"for",
"benchmark",
"in",
"disc_benchmarks",
"(",
"root",
")",
":",
"if",
"benchmark",
".",
"name",
"==",
"name",
":",
"break",
"else",
":",
"raise",
"ValueError",
"(",
"\"Could not find benchmark '{0}'\"",
".",
"format",
"(",
"name",
")",
")",
"if",
"param_idx",
"is",
"not",
"None",
":",
"benchmark",
".",
"set_param_idx",
"(",
"param_idx",
")",
"if",
"extra_params",
":",
"class",
"ExtraBenchmarkAttrs",
":",
"pass",
"for",
"key",
",",
"value",
"in",
"extra_params",
".",
"items",
"(",
")",
":",
"setattr",
"(",
"ExtraBenchmarkAttrs",
",",
"key",
",",
"value",
")",
"benchmark",
".",
"_attr_sources",
".",
"insert",
"(",
"0",
",",
"ExtraBenchmarkAttrs",
")",
"return",
"benchmark"
] | python | Create a benchmark from a fully-qualified benchmark name.
Parameters
----------
root : str
Path to the root of a benchmark suite.
name : str
Fully-qualified name to a specific benchmark. | false |
1,805,916 | def set_timer(self, num_secs):
"""
Set a timer.
Args:
num_secs(int): Number of seconds
"""
# FIXME / TODO support schemas? Accept timer id number as parameter?
# Dumb heuristic; Query status, pick last device id as that is probably the timer
status = self.status()
devices = status['dps']
devices_numbers = list(devices.keys())
devices_numbers.sort()
dps_id = devices_numbers[-1]
payload = self.generate_payload(SET, {dps_id:num_secs})
data = self._send_receive(payload)
log.debug('set_timer received data=%r', data)
return data | [
"def",
"set_timer",
"(",
"self",
",",
"num_secs",
")",
":",
"status",
"=",
"self",
".",
"status",
"(",
")",
"devices",
"=",
"status",
"[",
"'dps'",
"]",
"devices_numbers",
"=",
"list",
"(",
"devices",
".",
"keys",
"(",
")",
")",
"devices_numbers",
".",
"sort",
"(",
")",
"dps_id",
"=",
"devices_numbers",
"[",
"-",
"1",
"]",
"payload",
"=",
"self",
".",
"generate_payload",
"(",
"SET",
",",
"{",
"dps_id",
":",
"num_secs",
"}",
")",
"data",
"=",
"self",
".",
"_send_receive",
"(",
"payload",
")",
"log",
".",
"debug",
"(",
"'set_timer received data=%r'",
",",
"data",
")",
"return",
"data"
] | python | Set a timer.
Args:
num_secs(int): Number of seconds | false |
2,676,817 | def read(cls, iprot):
'''
Read a new object from the given input protocol and return the object.
:type iprot: thryft.protocol._input_protocol._InputProtocol
:rtype: pastpy.gen.database.impl.dummy.dummy_database_configuration.DummyDatabaseConfiguration
'''
init_kwds = {}
iprot.read_struct_begin()
while True:
ifield_name, ifield_type, _ifield_id = iprot.read_field_begin()
if ifield_type == 0: # STOP
break
elif ifield_name == 'images_per_object':
init_kwds['images_per_object'] = iprot.read_i32()
elif ifield_name == 'objects':
init_kwds['objects'] = iprot.read_i32()
iprot.read_field_end()
iprot.read_struct_end()
return cls(**init_kwds) | [
"def",
"read",
"(",
"cls",
",",
"iprot",
")",
":",
"init_kwds",
"=",
"{",
"}",
"iprot",
".",
"read_struct_begin",
"(",
")",
"while",
"True",
":",
"ifield_name",
",",
"ifield_type",
",",
"_ifield_id",
"=",
"iprot",
".",
"read_field_begin",
"(",
")",
"if",
"ifield_type",
"==",
"0",
":",
"break",
"elif",
"ifield_name",
"==",
"'images_per_object'",
":",
"init_kwds",
"[",
"'images_per_object'",
"]",
"=",
"iprot",
".",
"read_i32",
"(",
")",
"elif",
"ifield_name",
"==",
"'objects'",
":",
"init_kwds",
"[",
"'objects'",
"]",
"=",
"iprot",
".",
"read_i32",
"(",
")",
"iprot",
".",
"read_field_end",
"(",
")",
"iprot",
".",
"read_struct_end",
"(",
")",
"return",
"cls",
"(",
"**",
"init_kwds",
")"
] | python | Read a new object from the given input protocol and return the object.
:type iprot: thryft.protocol._input_protocol._InputProtocol
:rtype: pastpy.gen.database.impl.dummy.dummy_database_configuration.DummyDatabaseConfiguration | false |
2,290,565 | def get_objects_with_object(self, obj_type, *child_types):
"""
:param obj_type: requested object type.
:param child_type: requested child types.
:return: all children of the requested type that have the requested child types.
"""
return [o for o in self.get_objects_by_type(obj_type) if
o.get_objects_by_type(*child_types)] | [
"def",
"get_objects_with_object",
"(",
"self",
",",
"obj_type",
",",
"*",
"child_types",
")",
":",
"return",
"[",
"o",
"for",
"o",
"in",
"self",
".",
"get_objects_by_type",
"(",
"obj_type",
")",
"if",
"o",
".",
"get_objects_by_type",
"(",
"*",
"child_types",
")",
"]"
] | python | :param obj_type: requested object type.
:param child_type: requested child types.
:return: all children of the requested type that have the requested child types. | false |
2,261,648 | def ipi_base_number(name=None):
"""
IPI Base Number field.
An IPI Base Number code written on a field follows the Pattern
C-NNNNNNNNN-M. This being:
- C: header, a character.
- N: numeric value.
- M: control digit.
So, for example, an IPI Base Number code field can contain I-000000229-7.
:param name: name for the field
:return: a parser for the IPI Base Number field
"""
if name is None:
name = 'IPI Base Number Field'
field = pp.Regex('I-[0-9]{9}-[0-9]')
# Name
field.setName(name)
field_num = basic.numeric(13)
field_num.setName(name)
field = field | field_num
# White spaces are not removed
field.leaveWhitespace()
return field.setResultsName('ipi_base_n') | [
"def",
"ipi_base_number",
"(",
"name",
"=",
"None",
")",
":",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"'IPI Base Number Field'",
"field",
"=",
"pp",
".",
"Regex",
"(",
"'I-[0-9]{9}-[0-9]'",
")",
"field",
".",
"setName",
"(",
"name",
")",
"field_num",
"=",
"basic",
".",
"numeric",
"(",
"13",
")",
"field_num",
".",
"setName",
"(",
"name",
")",
"field",
"=",
"field",
"|",
"field_num",
"field",
".",
"leaveWhitespace",
"(",
")",
"return",
"field",
".",
"setResultsName",
"(",
"'ipi_base_n'",
")"
] | python | IPI Base Number field.
An IPI Base Number code written on a field follows the Pattern
C-NNNNNNNNN-M. This being:
- C: header, a character.
- N: numeric value.
- M: control digit.
So, for example, an IPI Base Number code field can contain I-000000229-7.
:param name: name for the field
:return: a parser for the IPI Base Number field | false |
2,276,088 | def _on_msg(self, msg):
"""Handle messages from the front-end"""
data = msg['content']['data']
# If the message is a call invoke, run the function and send
# the results.
if 'callback' in data:
guid = data['callback']
callback = callback_registry[guid]
args = data['arguments']
args = [self.deserialize(a) for a in args]
index = data['index']
results = callback(*args)
return self.serialize(self._send('return', index=index, results=results))
# The message is not a call invoke, it must be an object
# that is a response to a Python request.
else:
index = data['index']
immutable = data['immutable']
value = data['value']
if index in self._callbacks:
self._callbacks[index].resolve({
'immutable': immutable,
'value': value
})
del self._callbacks[index] | [
"def",
"_on_msg",
"(",
"self",
",",
"msg",
")",
":",
"data",
"=",
"msg",
"[",
"'content'",
"]",
"[",
"'data'",
"]",
"if",
"'callback'",
"in",
"data",
":",
"guid",
"=",
"data",
"[",
"'callback'",
"]",
"callback",
"=",
"callback_registry",
"[",
"guid",
"]",
"args",
"=",
"data",
"[",
"'arguments'",
"]",
"args",
"=",
"[",
"self",
".",
"deserialize",
"(",
"a",
")",
"for",
"a",
"in",
"args",
"]",
"index",
"=",
"data",
"[",
"'index'",
"]",
"results",
"=",
"callback",
"(",
"*",
"args",
")",
"return",
"self",
".",
"serialize",
"(",
"self",
".",
"_send",
"(",
"'return'",
",",
"index",
"=",
"index",
",",
"results",
"=",
"results",
")",
")",
"else",
":",
"index",
"=",
"data",
"[",
"'index'",
"]",
"immutable",
"=",
"data",
"[",
"'immutable'",
"]",
"value",
"=",
"data",
"[",
"'value'",
"]",
"if",
"index",
"in",
"self",
".",
"_callbacks",
":",
"self",
".",
"_callbacks",
"[",
"index",
"]",
".",
"resolve",
"(",
"{",
"'immutable'",
":",
"immutable",
",",
"'value'",
":",
"value",
"}",
")",
"del",
"self",
".",
"_callbacks",
"[",
"index",
"]"
] | python | Handle messages from the front-end | false |
2,219,368 | def _add_custom_headers(self, dct):
"""
Add the Client-ID header required by Cloud Queues
"""
if self.client_id is None:
self.client_id = os.environ.get("CLOUD_QUEUES_ID")
if self.client_id:
dct["Client-ID"] = self.client_id | [
"def",
"_add_custom_headers",
"(",
"self",
",",
"dct",
")",
":",
"if",
"self",
".",
"client_id",
"is",
"None",
":",
"self",
".",
"client_id",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"CLOUD_QUEUES_ID\"",
")",
"if",
"self",
".",
"client_id",
":",
"dct",
"[",
"\"Client-ID\"",
"]",
"=",
"self",
".",
"client_id"
] | python | Add the Client-ID header required by Cloud Queues | false |
2,529,034 | def remove_rule(self, ip_protocol, from_port, to_port,
src_group_name, src_group_owner_id, cidr_ip):
"""
Remove a rule to the SecurityGroup object. Note that this method
only changes the local version of the object. No information
is sent to EC2.
"""
target_rule = None
for rule in self.rules:
if rule.ip_protocol == ip_protocol:
if rule.from_port == from_port:
if rule.to_port == to_port:
target_rule = rule
target_grant = None
for grant in rule.grants:
if grant.name == src_group_name:
if grant.owner_id == src_group_owner_id:
if grant.cidr_ip == cidr_ip:
target_grant = grant
if target_grant:
rule.grants.remove(target_grant)
if len(rule.grants) == 0:
self.rules.remove(target_rule) | [
"def",
"remove_rule",
"(",
"self",
",",
"ip_protocol",
",",
"from_port",
",",
"to_port",
",",
"src_group_name",
",",
"src_group_owner_id",
",",
"cidr_ip",
")",
":",
"target_rule",
"=",
"None",
"for",
"rule",
"in",
"self",
".",
"rules",
":",
"if",
"rule",
".",
"ip_protocol",
"==",
"ip_protocol",
":",
"if",
"rule",
".",
"from_port",
"==",
"from_port",
":",
"if",
"rule",
".",
"to_port",
"==",
"to_port",
":",
"target_rule",
"=",
"rule",
"target_grant",
"=",
"None",
"for",
"grant",
"in",
"rule",
".",
"grants",
":",
"if",
"grant",
".",
"name",
"==",
"src_group_name",
":",
"if",
"grant",
".",
"owner_id",
"==",
"src_group_owner_id",
":",
"if",
"grant",
".",
"cidr_ip",
"==",
"cidr_ip",
":",
"target_grant",
"=",
"grant",
"if",
"target_grant",
":",
"rule",
".",
"grants",
".",
"remove",
"(",
"target_grant",
")",
"if",
"len",
"(",
"rule",
".",
"grants",
")",
"==",
"0",
":",
"self",
".",
"rules",
".",
"remove",
"(",
"target_rule",
")"
] | python | Remove a rule to the SecurityGroup object. Note that this method
only changes the local version of the object. No information
is sent to EC2. | false |
2,658,137 | def flush_body(self) -> bool:
"""
发送内容体
"""
if self._body is None:
return False
elif isinstance(self._body, bytes):
self.write(self._body)
return True
return False | [
"def",
"flush_body",
"(",
"self",
")",
"->",
"bool",
":",
"if",
"self",
".",
"_body",
"is",
"None",
":",
"return",
"False",
"elif",
"isinstance",
"(",
"self",
".",
"_body",
",",
"bytes",
")",
":",
"self",
".",
"write",
"(",
"self",
".",
"_body",
")",
"return",
"True",
"return",
"False"
] | python | 发送内容体 | false |
2,074,503 | def _check_relation(self, relation):
"""Raise a `ValueError` if `relation` is not allowed among
the possible values.
"""
selection = [val[0] for val in self.selection]
if relation not in selection:
raise ValueError(
("The value '{value}' supplied doesn't match with the possible"
" values '{selection}' for the '{field_name}' field").format(
value=relation,
selection=selection,
field_name=self.name,
))
return relation | [
"def",
"_check_relation",
"(",
"self",
",",
"relation",
")",
":",
"selection",
"=",
"[",
"val",
"[",
"0",
"]",
"for",
"val",
"in",
"self",
".",
"selection",
"]",
"if",
"relation",
"not",
"in",
"selection",
":",
"raise",
"ValueError",
"(",
"(",
"\"The value '{value}' supplied doesn't match with the possible\"",
"\" values '{selection}' for the '{field_name}' field\"",
")",
".",
"format",
"(",
"value",
"=",
"relation",
",",
"selection",
"=",
"selection",
",",
"field_name",
"=",
"self",
".",
"name",
",",
")",
")",
"return",
"relation"
] | python | Raise a `ValueError` if `relation` is not allowed among
the possible values. | false |
1,705,901 | def generate_k(order, secexp, hash_func, data):
'''
order - order of the DSA generator used in the signature
secexp - secure exponent (private key) in numeric form
hash_func - reference to the same hash function used for generating hash
data - hash in binary form of the signing data
'''
qlen = bit_length(order)
holen = hash_func().digest_size
rolen = (qlen + 7) / 8
bx = number_to_string(secexp, order) + bits2octets(data, order)
# Step B
v = b('\x01') * holen
# Step C
k = b('\x00') * holen
# Step D
k = hmac.new(k, v+b('\x00')+bx, hash_func).digest()
# Step E
v = hmac.new(k, v, hash_func).digest()
# Step F
k = hmac.new(k, v+b('\x01')+bx, hash_func).digest()
# Step G
v = hmac.new(k, v, hash_func).digest()
# Step H
while True:
# Step H1
t = b('')
# Step H2
while len(t) < rolen:
v = hmac.new(k, v, hash_func).digest()
t += v
# Step H3
secret = bits2int(t, qlen)
if secret >= 1 and secret < order:
return secret
k = hmac.new(k, v+b('\x00'), hash_func).digest()
v = hmac.new(k, v, hash_func).digest() | [
"def",
"generate_k",
"(",
"order",
",",
"secexp",
",",
"hash_func",
",",
"data",
")",
":",
"qlen",
"=",
"bit_length",
"(",
"order",
")",
"holen",
"=",
"hash_func",
"(",
")",
".",
"digest_size",
"rolen",
"=",
"(",
"qlen",
"+",
"7",
")",
"/",
"8",
"bx",
"=",
"number_to_string",
"(",
"secexp",
",",
"order",
")",
"+",
"bits2octets",
"(",
"data",
",",
"order",
")",
"v",
"=",
"b",
"(",
"'\\x01'",
")",
"*",
"holen",
"k",
"=",
"b",
"(",
"'\\x00'",
")",
"*",
"holen",
"k",
"=",
"hmac",
".",
"new",
"(",
"k",
",",
"v",
"+",
"b",
"(",
"'\\x00'",
")",
"+",
"bx",
",",
"hash_func",
")",
".",
"digest",
"(",
")",
"v",
"=",
"hmac",
".",
"new",
"(",
"k",
",",
"v",
",",
"hash_func",
")",
".",
"digest",
"(",
")",
"k",
"=",
"hmac",
".",
"new",
"(",
"k",
",",
"v",
"+",
"b",
"(",
"'\\x01'",
")",
"+",
"bx",
",",
"hash_func",
")",
".",
"digest",
"(",
")",
"v",
"=",
"hmac",
".",
"new",
"(",
"k",
",",
"v",
",",
"hash_func",
")",
".",
"digest",
"(",
")",
"while",
"True",
":",
"t",
"=",
"b",
"(",
"''",
")",
"while",
"len",
"(",
"t",
")",
"<",
"rolen",
":",
"v",
"=",
"hmac",
".",
"new",
"(",
"k",
",",
"v",
",",
"hash_func",
")",
".",
"digest",
"(",
")",
"t",
"+=",
"v",
"secret",
"=",
"bits2int",
"(",
"t",
",",
"qlen",
")",
"if",
"secret",
">=",
"1",
"and",
"secret",
"<",
"order",
":",
"return",
"secret",
"k",
"=",
"hmac",
".",
"new",
"(",
"k",
",",
"v",
"+",
"b",
"(",
"'\\x00'",
")",
",",
"hash_func",
")",
".",
"digest",
"(",
")",
"v",
"=",
"hmac",
".",
"new",
"(",
"k",
",",
"v",
",",
"hash_func",
")",
".",
"digest",
"(",
")"
] | python | order - order of the DSA generator used in the signature
secexp - secure exponent (private key) in numeric form
hash_func - reference to the same hash function used for generating hash
data - hash in binary form of the signing data | false |
2,345,097 | def total_hits(self, filename=None):
"""
Return the total number of covered statements for the file
`filename`. If `filename` is not given, return the total
number of covered statements for all files.
"""
if filename is not None:
return len(self.hit_statements(filename))
total = 0
for filename in self.files():
total += len(self.hit_statements(filename))
return total | [
"def",
"total_hits",
"(",
"self",
",",
"filename",
"=",
"None",
")",
":",
"if",
"filename",
"is",
"not",
"None",
":",
"return",
"len",
"(",
"self",
".",
"hit_statements",
"(",
"filename",
")",
")",
"total",
"=",
"0",
"for",
"filename",
"in",
"self",
".",
"files",
"(",
")",
":",
"total",
"+=",
"len",
"(",
"self",
".",
"hit_statements",
"(",
"filename",
")",
")",
"return",
"total"
] | python | Return the total number of covered statements for the file
`filename`. If `filename` is not given, return the total
number of covered statements for all files. | false |
2,627,191 | def str2long(s):
"""Convert a string to a long integer."""
if type(s) not in (types.StringType, types.UnicodeType):
raise ValueError('the input must be a string')
l = 0
for i in s:
l <<= 8
l |= ord(i)
return l | [
"def",
"str2long",
"(",
"s",
")",
":",
"if",
"type",
"(",
"s",
")",
"not",
"in",
"(",
"types",
".",
"StringType",
",",
"types",
".",
"UnicodeType",
")",
":",
"raise",
"ValueError",
"(",
"'the input must be a string'",
")",
"l",
"=",
"0",
"for",
"i",
"in",
"s",
":",
"l",
"<<=",
"8",
"l",
"|=",
"ord",
"(",
"i",
")",
"return",
"l"
] | python | Convert a string to a long integer. | false |
2,325,765 | def activate(self):
"""Activate a Vera scene.
This will call the Vera api to activate a scene.
"""
payload = {
'id': 'lu_action',
'action': 'RunScene',
'serviceId': self.scene_service
}
result = self.vera_request(**payload)
logger.debug("activate: "
"result of vera_request with payload %s: %s",
payload, result.text)
self._active = True | [
"def",
"activate",
"(",
"self",
")",
":",
"payload",
"=",
"{",
"'id'",
":",
"'lu_action'",
",",
"'action'",
":",
"'RunScene'",
",",
"'serviceId'",
":",
"self",
".",
"scene_service",
"}",
"result",
"=",
"self",
".",
"vera_request",
"(",
"**",
"payload",
")",
"logger",
".",
"debug",
"(",
"\"activate: \"",
"\"result of vera_request with payload %s: %s\"",
",",
"payload",
",",
"result",
".",
"text",
")",
"self",
".",
"_active",
"=",
"True"
] | python | Activate a Vera scene.
This will call the Vera api to activate a scene. | false |
2,142,741 | def processHierarchical(self):
"""Main process.for hierarchial segmentation.
Returns
-------
est_idxs : list
List with np.arrays for each layer of segmentation containing
the estimated indeces for the segment boundaries.
est_labels : list
List with np.arrays containing the labels for each layer of the
hierarchical segmentation.
"""
F = self._preprocess()
F = librosa.util.normalize(F, axis=0)
F = librosa.feature.stack_memory(F.T).T
self.config["hier"] = True
est_idxs, est_labels, F = main.scluster_segment(F, self.config, self.in_bound_idxs)
for layer in range(len(est_idxs)):
assert est_idxs[layer][0] == 0 and \
est_idxs[layer][-1] == F.shape[1] - 1
est_idxs[layer], est_labels[layer] = \
self._postprocess(est_idxs[layer], est_labels[layer])
return est_idxs, est_labels | [
"def",
"processHierarchical",
"(",
"self",
")",
":",
"F",
"=",
"self",
".",
"_preprocess",
"(",
")",
"F",
"=",
"librosa",
".",
"util",
".",
"normalize",
"(",
"F",
",",
"axis",
"=",
"0",
")",
"F",
"=",
"librosa",
".",
"feature",
".",
"stack_memory",
"(",
"F",
".",
"T",
")",
".",
"T",
"self",
".",
"config",
"[",
"\"hier\"",
"]",
"=",
"True",
"est_idxs",
",",
"est_labels",
",",
"F",
"=",
"main",
".",
"scluster_segment",
"(",
"F",
",",
"self",
".",
"config",
",",
"self",
".",
"in_bound_idxs",
")",
"for",
"layer",
"in",
"range",
"(",
"len",
"(",
"est_idxs",
")",
")",
":",
"assert",
"est_idxs",
"[",
"layer",
"]",
"[",
"0",
"]",
"==",
"0",
"and",
"est_idxs",
"[",
"layer",
"]",
"[",
"-",
"1",
"]",
"==",
"F",
".",
"shape",
"[",
"1",
"]",
"-",
"1",
"est_idxs",
"[",
"layer",
"]",
",",
"est_labels",
"[",
"layer",
"]",
"=",
"self",
".",
"_postprocess",
"(",
"est_idxs",
"[",
"layer",
"]",
",",
"est_labels",
"[",
"layer",
"]",
")",
"return",
"est_idxs",
",",
"est_labels"
] | python | Main process.for hierarchial segmentation.
Returns
-------
est_idxs : list
List with np.arrays for each layer of segmentation containing
the estimated indeces for the segment boundaries.
est_labels : list
List with np.arrays containing the labels for each layer of the
hierarchical segmentation. | false |
1,818,447 | def matrix_check(table):
"""
Check input matrix format.
:param table: input matrix
:type table : dict
:return: bool
"""
try:
if len(table.keys()) == 0:
return False
for i in table.keys():
if table.keys() != table[i].keys() or vector_check(
list(table[i].values())) is False:
return False
return True
except Exception:
return False | [
"def",
"matrix_check",
"(",
"table",
")",
":",
"try",
":",
"if",
"len",
"(",
"table",
".",
"keys",
"(",
")",
")",
"==",
"0",
":",
"return",
"False",
"for",
"i",
"in",
"table",
".",
"keys",
"(",
")",
":",
"if",
"table",
".",
"keys",
"(",
")",
"!=",
"table",
"[",
"i",
"]",
".",
"keys",
"(",
")",
"or",
"vector_check",
"(",
"list",
"(",
"table",
"[",
"i",
"]",
".",
"values",
"(",
")",
")",
")",
"is",
"False",
":",
"return",
"False",
"return",
"True",
"except",
"Exception",
":",
"return",
"False"
] | python | Check input matrix format.
:param table: input matrix
:type table : dict
:return: bool | false |
1,636,459 | def _get_binding_info(host_header='', ip_address='*', port=80):
'''
Combine the host header, IP address, and TCP port into bindingInformation
format. Binding Information specifies information to communicate with a
site. It includes the IP address, the port number, and an optional host
header (usually a host name) to communicate with the site.
Args:
host_header (str): Usually a hostname
ip_address (str): The IP address
port (int): The port
Returns:
str: A properly formatted bindingInformation string (IP:port:hostheader)
eg: 192.168.0.12:80:www.contoso.com
'''
return ':'.join([ip_address, six.text_type(port),
host_header.replace(' ', '')]) | [
"def",
"_get_binding_info",
"(",
"host_header",
"=",
"''",
",",
"ip_address",
"=",
"'*'",
",",
"port",
"=",
"80",
")",
":",
"return",
"':'",
".",
"join",
"(",
"[",
"ip_address",
",",
"six",
".",
"text_type",
"(",
"port",
")",
",",
"host_header",
".",
"replace",
"(",
"' '",
",",
"''",
")",
"]",
")"
] | python | Combine the host header, IP address, and TCP port into bindingInformation
format. Binding Information specifies information to communicate with a
site. It includes the IP address, the port number, and an optional host
header (usually a host name) to communicate with the site.
Args:
host_header (str): Usually a hostname
ip_address (str): The IP address
port (int): The port
Returns:
str: A properly formatted bindingInformation string (IP:port:hostheader)
eg: 192.168.0.12:80:www.contoso.com | false |
2,632,214 | def get_rpndict_flag(self, rpndict):
""" calculate flag set, the value is True or False,
if rpndict value is not None, flag is True, or False
if set with only one item, i.e. True returns,
means values of rpndict are all valid float numbers,
then finally return True, or False
"""
flag_set = set([rpn.Rpn.solve_rpn(str(v)) is not None for v in rpndict.values()])
if len(flag_set) == 1 and flag_set.pop():
return True
else:
return False | [
"def",
"get_rpndict_flag",
"(",
"self",
",",
"rpndict",
")",
":",
"flag_set",
"=",
"set",
"(",
"[",
"rpn",
".",
"Rpn",
".",
"solve_rpn",
"(",
"str",
"(",
"v",
")",
")",
"is",
"not",
"None",
"for",
"v",
"in",
"rpndict",
".",
"values",
"(",
")",
"]",
")",
"if",
"len",
"(",
"flag_set",
")",
"==",
"1",
"and",
"flag_set",
".",
"pop",
"(",
")",
":",
"return",
"True",
"else",
":",
"return",
"False"
] | python | calculate flag set, the value is True or False,
if rpndict value is not None, flag is True, or False
if set with only one item, i.e. True returns,
means values of rpndict are all valid float numbers,
then finally return True, or False | false |
1,607,040 | def copy_remote_directory_to_local(sftp, remote_path, local_path):
'''copy remote directory to local machine'''
try:
os.makedirs(local_path, exist_ok=True)
files = sftp.listdir(remote_path)
for file in files:
remote_full_path = os.path.join(remote_path, file)
local_full_path = os.path.join(local_path, file)
try:
if sftp.listdir(remote_full_path):
copy_remote_directory_to_local(sftp, remote_full_path, local_full_path)
except:
sftp.get(remote_full_path, local_full_path)
except Exception:
pass | [
"def",
"copy_remote_directory_to_local",
"(",
"sftp",
",",
"remote_path",
",",
"local_path",
")",
":",
"try",
":",
"os",
".",
"makedirs",
"(",
"local_path",
",",
"exist_ok",
"=",
"True",
")",
"files",
"=",
"sftp",
".",
"listdir",
"(",
"remote_path",
")",
"for",
"file",
"in",
"files",
":",
"remote_full_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"remote_path",
",",
"file",
")",
"local_full_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"local_path",
",",
"file",
")",
"try",
":",
"if",
"sftp",
".",
"listdir",
"(",
"remote_full_path",
")",
":",
"copy_remote_directory_to_local",
"(",
"sftp",
",",
"remote_full_path",
",",
"local_full_path",
")",
"except",
":",
"sftp",
".",
"get",
"(",
"remote_full_path",
",",
"local_full_path",
")",
"except",
"Exception",
":",
"pass"
] | python | copy remote directory to local machine | false |
2,553,178 | def expand_recurring(number, repeat=5):
"""
Expands a recurring pattern within a number.
Args:
number(tuple): the number to process in the form:
(int, int, int, ... ".", ... , int int int)
repeat: the number of times to expand the pattern.
Returns:
The original number with recurring pattern expanded.
Example:
>>> expand_recurring((1, ".", 0, "[", 9, "]"), repeat=3)
(1, '.', 0, 9, 9, 9, 9)
"""
if "[" in number:
pattern_index = number.index("[")
pattern = number[pattern_index + 1:-1]
number = number[:pattern_index]
number = number + pattern * (repeat + 1)
return number | [
"def",
"expand_recurring",
"(",
"number",
",",
"repeat",
"=",
"5",
")",
":",
"if",
"\"[\"",
"in",
"number",
":",
"pattern_index",
"=",
"number",
".",
"index",
"(",
"\"[\"",
")",
"pattern",
"=",
"number",
"[",
"pattern_index",
"+",
"1",
":",
"-",
"1",
"]",
"number",
"=",
"number",
"[",
":",
"pattern_index",
"]",
"number",
"=",
"number",
"+",
"pattern",
"*",
"(",
"repeat",
"+",
"1",
")",
"return",
"number"
] | python | Expands a recurring pattern within a number.
Args:
number(tuple): the number to process in the form:
(int, int, int, ... ".", ... , int int int)
repeat: the number of times to expand the pattern.
Returns:
The original number with recurring pattern expanded.
Example:
>>> expand_recurring((1, ".", 0, "[", 9, "]"), repeat=3)
(1, '.', 0, 9, 9, 9, 9) | false |
2,655,424 | def __init__(self, token=None, case_sensitive=True, *args, **kwargs):
"""
:param token: Static value to see check it is contained in the string
:param case_sensitive: Boolean to check the string matching case or not
"""
super(StringNotContaining, self).__init__(*args, **kwargs)
self.token = token
self.case_sensitive = case_sensitive
self.message_values.update({'token': self.token}) | [
"def",
"__init__",
"(",
"self",
",",
"token",
"=",
"None",
",",
"case_sensitive",
"=",
"True",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"super",
"(",
"StringNotContaining",
",",
"self",
")",
".",
"__init__",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"self",
".",
"token",
"=",
"token",
"self",
".",
"case_sensitive",
"=",
"case_sensitive",
"self",
".",
"message_values",
".",
"update",
"(",
"{",
"'token'",
":",
"self",
".",
"token",
"}",
")"
] | python | :param token: Static value to see check it is contained in the string
:param case_sensitive: Boolean to check the string matching case or not | false |
1,866,552 | def __init__(self, base_class=BaseHeader, default_class=UnstructuredHeader,
use_default_map=True):
"""Create a header_factory that works with the Policy API.
base_class is the class that will be the last class in the created
header class's __bases__ list. default_class is the class that will be
used if "name" (see __call__) does not appear in the registry.
use_default_map controls whether or not the default mapping of names to
specialized classes is copied in to the registry when the factory is
created. The default is True.
"""
self.registry = {}
self.base_class = base_class
self.default_class = default_class
if use_default_map:
self.registry.update(_default_header_map) | [
"def",
"__init__",
"(",
"self",
",",
"base_class",
"=",
"BaseHeader",
",",
"default_class",
"=",
"UnstructuredHeader",
",",
"use_default_map",
"=",
"True",
")",
":",
"self",
".",
"registry",
"=",
"{",
"}",
"self",
".",
"base_class",
"=",
"base_class",
"self",
".",
"default_class",
"=",
"default_class",
"if",
"use_default_map",
":",
"self",
".",
"registry",
".",
"update",
"(",
"_default_header_map",
")"
] | python | Create a header_factory that works with the Policy API.
base_class is the class that will be the last class in the created
header class's __bases__ list. default_class is the class that will be
used if "name" (see __call__) does not appear in the registry.
use_default_map controls whether or not the default mapping of names to
specialized classes is copied in to the registry when the factory is
created. The default is True. | false |
2,564,264 | def paramsReport(self):
"""See docs for `Model` abstract base class."""
report = self._models[0].paramsReport
del report[self.distributedparam]
for param in self.distributionparams:
new_name = "_".join([param.split("_")[0], self.distributedparam])
report[new_name] = getattr(self, param)
return report | [
"def",
"paramsReport",
"(",
"self",
")",
":",
"report",
"=",
"self",
".",
"_models",
"[",
"0",
"]",
".",
"paramsReport",
"del",
"report",
"[",
"self",
".",
"distributedparam",
"]",
"for",
"param",
"in",
"self",
".",
"distributionparams",
":",
"new_name",
"=",
"\"_\"",
".",
"join",
"(",
"[",
"param",
".",
"split",
"(",
"\"_\"",
")",
"[",
"0",
"]",
",",
"self",
".",
"distributedparam",
"]",
")",
"report",
"[",
"new_name",
"]",
"=",
"getattr",
"(",
"self",
",",
"param",
")",
"return",
"report"
] | python | See docs for `Model` abstract base class. | false |
2,206,877 | def find_for_event(cls, event, include_hidden=False, **kwargs):
"""Returns a Query that retrieves the chatrooms for an event
:param event: an indico event (with a numeric ID)
:param include_hidden: if hidden chatrooms should be included, too
:param kwargs: extra kwargs to pass to ``find()``
"""
query = cls.find(event_id=event.id, **kwargs)
if not include_hidden:
query = query.filter(~cls.hidden)
return query | [
"def",
"find_for_event",
"(",
"cls",
",",
"event",
",",
"include_hidden",
"=",
"False",
",",
"**",
"kwargs",
")",
":",
"query",
"=",
"cls",
".",
"find",
"(",
"event_id",
"=",
"event",
".",
"id",
",",
"**",
"kwargs",
")",
"if",
"not",
"include_hidden",
":",
"query",
"=",
"query",
".",
"filter",
"(",
"~",
"cls",
".",
"hidden",
")",
"return",
"query"
] | python | Returns a Query that retrieves the chatrooms for an event
:param event: an indico event (with a numeric ID)
:param include_hidden: if hidden chatrooms should be included, too
:param kwargs: extra kwargs to pass to ``find()`` | false |
2,213,641 | def handle_request(self, connection, msg):
"""Dispatch a request message to the appropriate method.
Parameters
----------
connection : ClientConnection object
The client connection the message was from.
msg : Message object
The request message to process.
Returns
-------
done_future : Future or None
Returns Future for async request handlers that will resolve when
done, or None for sync request handlers once they have completed.
"""
send_reply = True
# TODO Should check presence of Message-ids against protocol flags and
# raise an error as needed.
if msg.name in self._request_handlers:
req_conn = ClientRequestConnection(connection, msg)
handler = self._request_handlers[msg.name]
try:
reply = handler(self, req_conn, msg)
# If we get a future, assume this is an async message handler
# that will resolve the future with the reply message when it
# is complete. Attach a message-sending callback to the future,
# and return the future.
if gen.is_future(reply):
concurrent = getattr(handler, '_concurrent_reply', False)
concurrent_str = ' CONCURRENT' if concurrent else ''
done_future = Future()
def async_reply(f):
try:
connection.reply(f.result(), msg)
self._logger.debug("%s FUTURE%s replied",
msg.name, concurrent_str)
except FailReply, e:
reason = str(e)
self._logger.error("Request %s FUTURE%s FAIL: %s",
msg.name, concurrent_str, reason)
reply = Message.reply(msg.name, "fail", reason)
connection.reply(reply, msg)
except AsyncReply:
self._logger.debug("%s FUTURE ASYNC OK"
% (msg.name,))
except Exception:
error_reply = self.create_exception_reply_and_log(
msg, sys.exc_info())
connection.reply(error_reply, msg)
finally:
done_future.set_result(None)
# TODO When using the return_reply() decorator the future
# returned is not currently threadsafe, must either deal
# with it here, or in kattypes.py. Would be nice if we don't
# have to always fall back to adding a callback, or wrapping
# a thread-safe future. Supporting sync-with-thread and
# async futures is turning out to be a pain in the ass ;)
self.ioloop.add_callback(reply.add_done_callback, async_reply)
# reply.add_done_callback(async_reply)
if concurrent:
# Return immediately if this is a concurrent handler
self._logger.debug("%s FUTURE CONCURRENT OK", msg.name)
return
else:
self._logger.debug("%s FUTURE OK", msg.name)
return done_future
else:
assert (reply.mtype == Message.REPLY)
assert (reply.name == msg.name)
self._logger.debug("%s OK" % (msg.name,))
except AsyncReply, e:
self._logger.debug("%s ASYNC OK" % (msg.name,))
send_reply = False
except FailReply, e:
reason = str(e)
self._logger.error("Request %s FAIL: %s" % (msg.name, reason))
reply = Message.reply(msg.name, "fail", reason)
except Exception:
reply = self.create_exception_reply_and_log(msg, sys.exc_info())
else:
self._logger.error("%s INVALID: Unknown request." % (msg.name,))
reply = Message.reply(msg.name, "invalid", "Unknown request.")
if send_reply:
connection.reply(reply, msg) | [
"def",
"handle_request",
"(",
"self",
",",
"connection",
",",
"msg",
")",
":",
"send_reply",
"=",
"True",
"if",
"msg",
".",
"name",
"in",
"self",
".",
"_request_handlers",
":",
"req_conn",
"=",
"ClientRequestConnection",
"(",
"connection",
",",
"msg",
")",
"handler",
"=",
"self",
".",
"_request_handlers",
"[",
"msg",
".",
"name",
"]",
"try",
":",
"reply",
"=",
"handler",
"(",
"self",
",",
"req_conn",
",",
"msg",
")",
"if",
"gen",
".",
"is_future",
"(",
"reply",
")",
":",
"concurrent",
"=",
"getattr",
"(",
"handler",
",",
"'_concurrent_reply'",
",",
"False",
")",
"concurrent_str",
"=",
"' CONCURRENT'",
"if",
"concurrent",
"else",
"''",
"done_future",
"=",
"Future",
"(",
")",
"def",
"async_reply",
"(",
"f",
")",
":",
"try",
":",
"connection",
".",
"reply",
"(",
"f",
".",
"result",
"(",
")",
",",
"msg",
")",
"self",
".",
"_logger",
".",
"debug",
"(",
"\"%s FUTURE%s replied\"",
",",
"msg",
".",
"name",
",",
"concurrent_str",
")",
"except",
"FailReply",
",",
"e",
":",
"reason",
"=",
"str",
"(",
"e",
")",
"self",
".",
"_logger",
".",
"error",
"(",
"\"Request %s FUTURE%s FAIL: %s\"",
",",
"msg",
".",
"name",
",",
"concurrent_str",
",",
"reason",
")",
"reply",
"=",
"Message",
".",
"reply",
"(",
"msg",
".",
"name",
",",
"\"fail\"",
",",
"reason",
")",
"connection",
".",
"reply",
"(",
"reply",
",",
"msg",
")",
"except",
"AsyncReply",
":",
"self",
".",
"_logger",
".",
"debug",
"(",
"\"%s FUTURE ASYNC OK\"",
"%",
"(",
"msg",
".",
"name",
",",
")",
")",
"except",
"Exception",
":",
"error_reply",
"=",
"self",
".",
"create_exception_reply_and_log",
"(",
"msg",
",",
"sys",
".",
"exc_info",
"(",
")",
")",
"connection",
".",
"reply",
"(",
"error_reply",
",",
"msg",
")",
"finally",
":",
"done_future",
".",
"set_result",
"(",
"None",
")",
"self",
".",
"ioloop",
".",
"add_callback",
"(",
"reply",
".",
"add_done_callback",
",",
"async_reply",
")",
"if",
"concurrent",
":",
"self",
".",
"_logger",
".",
"debug",
"(",
"\"%s FUTURE CONCURRENT OK\"",
",",
"msg",
".",
"name",
")",
"return",
"else",
":",
"self",
".",
"_logger",
".",
"debug",
"(",
"\"%s FUTURE OK\"",
",",
"msg",
".",
"name",
")",
"return",
"done_future",
"else",
":",
"assert",
"(",
"reply",
".",
"mtype",
"==",
"Message",
".",
"REPLY",
")",
"assert",
"(",
"reply",
".",
"name",
"==",
"msg",
".",
"name",
")",
"self",
".",
"_logger",
".",
"debug",
"(",
"\"%s OK\"",
"%",
"(",
"msg",
".",
"name",
",",
")",
")",
"except",
"AsyncReply",
",",
"e",
":",
"self",
".",
"_logger",
".",
"debug",
"(",
"\"%s ASYNC OK\"",
"%",
"(",
"msg",
".",
"name",
",",
")",
")",
"send_reply",
"=",
"False",
"except",
"FailReply",
",",
"e",
":",
"reason",
"=",
"str",
"(",
"e",
")",
"self",
".",
"_logger",
".",
"error",
"(",
"\"Request %s FAIL: %s\"",
"%",
"(",
"msg",
".",
"name",
",",
"reason",
")",
")",
"reply",
"=",
"Message",
".",
"reply",
"(",
"msg",
".",
"name",
",",
"\"fail\"",
",",
"reason",
")",
"except",
"Exception",
":",
"reply",
"=",
"self",
".",
"create_exception_reply_and_log",
"(",
"msg",
",",
"sys",
".",
"exc_info",
"(",
")",
")",
"else",
":",
"self",
".",
"_logger",
".",
"error",
"(",
"\"%s INVALID: Unknown request.\"",
"%",
"(",
"msg",
".",
"name",
",",
")",
")",
"reply",
"=",
"Message",
".",
"reply",
"(",
"msg",
".",
"name",
",",
"\"invalid\"",
",",
"\"Unknown request.\"",
")",
"if",
"send_reply",
":",
"connection",
".",
"reply",
"(",
"reply",
",",
"msg",
")"
] | python | Dispatch a request message to the appropriate method.
Parameters
----------
connection : ClientConnection object
The client connection the message was from.
msg : Message object
The request message to process.
Returns
-------
done_future : Future or None
Returns Future for async request handlers that will resolve when
done, or None for sync request handlers once they have completed. | false |
1,832,445 | def copydb(self, sourcedb, destslab, destdbname=None, progresscb=None):
'''
Copy an entire database in this slab to a new database in potentially another slab.
Args:
sourcedb (LmdbDatabase): which database in this slab to copy rows from
destslab (LmdbSlab): which slab to copy rows to
destdbname (str): the name of the database to copy rows to in destslab
progresscb (Callable[int]): if not None, this function will be periodically called with the number of rows
completed
Returns:
(int): the number of rows copied
Note:
If any rows already exist in the target database, this method returns an error. This means that one cannot
use destdbname=None unless there are no explicit databases in the destination slab.
'''
destdb = destslab.initdb(destdbname, sourcedb.dupsort)
statdict = destslab.stat(db=destdb)
if statdict['entries'] > 0:
raise s_exc.DataAlreadyExists()
rowcount = 0
for chunk in s_common.chunks(self.scanByFull(db=sourcedb), COPY_CHUNKSIZE):
ccount, acount = destslab.putmulti(chunk, dupdata=True, append=True, db=destdb)
if ccount != len(chunk) or acount != len(chunk):
raise s_exc.BadCoreStore(mesg='Unexpected number of values written') # pragma: no cover
rowcount += len(chunk)
if progresscb is not None and 0 == (rowcount % PROGRESS_PERIOD):
progresscb(rowcount)
return rowcount | [
"def",
"copydb",
"(",
"self",
",",
"sourcedb",
",",
"destslab",
",",
"destdbname",
"=",
"None",
",",
"progresscb",
"=",
"None",
")",
":",
"destdb",
"=",
"destslab",
".",
"initdb",
"(",
"destdbname",
",",
"sourcedb",
".",
"dupsort",
")",
"statdict",
"=",
"destslab",
".",
"stat",
"(",
"db",
"=",
"destdb",
")",
"if",
"statdict",
"[",
"'entries'",
"]",
">",
"0",
":",
"raise",
"s_exc",
".",
"DataAlreadyExists",
"(",
")",
"rowcount",
"=",
"0",
"for",
"chunk",
"in",
"s_common",
".",
"chunks",
"(",
"self",
".",
"scanByFull",
"(",
"db",
"=",
"sourcedb",
")",
",",
"COPY_CHUNKSIZE",
")",
":",
"ccount",
",",
"acount",
"=",
"destslab",
".",
"putmulti",
"(",
"chunk",
",",
"dupdata",
"=",
"True",
",",
"append",
"=",
"True",
",",
"db",
"=",
"destdb",
")",
"if",
"ccount",
"!=",
"len",
"(",
"chunk",
")",
"or",
"acount",
"!=",
"len",
"(",
"chunk",
")",
":",
"raise",
"s_exc",
".",
"BadCoreStore",
"(",
"mesg",
"=",
"'Unexpected number of values written'",
")",
"rowcount",
"+=",
"len",
"(",
"chunk",
")",
"if",
"progresscb",
"is",
"not",
"None",
"and",
"0",
"==",
"(",
"rowcount",
"%",
"PROGRESS_PERIOD",
")",
":",
"progresscb",
"(",
"rowcount",
")",
"return",
"rowcount"
] | python | Copy an entire database in this slab to a new database in potentially another slab.
Args:
sourcedb (LmdbDatabase): which database in this slab to copy rows from
destslab (LmdbSlab): which slab to copy rows to
destdbname (str): the name of the database to copy rows to in destslab
progresscb (Callable[int]): if not None, this function will be periodically called with the number of rows
completed
Returns:
(int): the number of rows copied
Note:
If any rows already exist in the target database, this method returns an error. This means that one cannot
use destdbname=None unless there are no explicit databases in the destination slab. | false |
2,379,425 | def create_log_stream(awsclient, log_group_name, log_stream_name):
"""Creates a log stream for the specified log group.
:param log_group_name: log group name
:param log_stream_name: log stream name
:return:
"""
client_logs = awsclient.get_client('logs')
response = client_logs.create_log_stream(
logGroupName=log_group_name,
logStreamName=log_stream_name
) | [
"def",
"create_log_stream",
"(",
"awsclient",
",",
"log_group_name",
",",
"log_stream_name",
")",
":",
"client_logs",
"=",
"awsclient",
".",
"get_client",
"(",
"'logs'",
")",
"response",
"=",
"client_logs",
".",
"create_log_stream",
"(",
"logGroupName",
"=",
"log_group_name",
",",
"logStreamName",
"=",
"log_stream_name",
")"
] | python | Creates a log stream for the specified log group.
:param log_group_name: log group name
:param log_stream_name: log stream name
:return: | false |
1,906,957 | def leaky_relu(x, name=None):
"""Creates a leaky_relu.
This is an alternate non-linearity to relu. The leaky part of the relu may
prevent dead Neurons in a model since the gradient doesn't go completely to
0.
Args:
x: The input tensor.
name: Optional name for this op.
Returns:
x if x > 0 otherwise 0.01 * x.
"""
with tf.name_scope(name, 'leaky_relu', [x]) as scope:
x = tf.convert_to_tensor(x, name='x')
return tf.where(tf.less(x, 0.0), 0.01 * x, x, name=scope) | [
"def",
"leaky_relu",
"(",
"x",
",",
"name",
"=",
"None",
")",
":",
"with",
"tf",
".",
"name_scope",
"(",
"name",
",",
"'leaky_relu'",
",",
"[",
"x",
"]",
")",
"as",
"scope",
":",
"x",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"x",
",",
"name",
"=",
"'x'",
")",
"return",
"tf",
".",
"where",
"(",
"tf",
".",
"less",
"(",
"x",
",",
"0.0",
")",
",",
"0.01",
"*",
"x",
",",
"x",
",",
"name",
"=",
"scope",
")"
] | python | Creates a leaky_relu.
This is an alternate non-linearity to relu. The leaky part of the relu may
prevent dead Neurons in a model since the gradient doesn't go completely to
0.
Args:
x: The input tensor.
name: Optional name for this op.
Returns:
x if x > 0 otherwise 0.01 * x. | false |
2,575,673 | def reverse_transform(self, tables, table_metas=None, missing=None):
"""Transform data back to its original format.
Args:
tables(dict): mapping of table names to `tuple` where each tuple is on the form
(`pandas.DataFrame`, `dict`). The `DataFrame` contains the transformed
data and the `dict` the corresponding meta information.
If not specified, the tables will be retrieved using the meta_file.
table_metas(dict): Full metadata file for the dataset.
missing(bool): Wheter or not use NullTransformer to handle missing values.
Returns:
dict: Map from `str` (table_names) to `pandas.DataFrame` (transformed data).
"""
if missing is None:
missing = self.missing
else:
self.missing = missing
warnings.warn(DEPRECATION_MESSAGE.format('reverse_transform'), DeprecationWarning)
reverse = {}
for table_name in tables:
table = tables[table_name]
if table_metas is None:
table_meta = self.table_dict[table_name][1]
else:
table_meta = table_metas[table_name]
reverse[table_name] = self.reverse_transform_table(table, table_meta)
return reverse | [
"def",
"reverse_transform",
"(",
"self",
",",
"tables",
",",
"table_metas",
"=",
"None",
",",
"missing",
"=",
"None",
")",
":",
"if",
"missing",
"is",
"None",
":",
"missing",
"=",
"self",
".",
"missing",
"else",
":",
"self",
".",
"missing",
"=",
"missing",
"warnings",
".",
"warn",
"(",
"DEPRECATION_MESSAGE",
".",
"format",
"(",
"'reverse_transform'",
")",
",",
"DeprecationWarning",
")",
"reverse",
"=",
"{",
"}",
"for",
"table_name",
"in",
"tables",
":",
"table",
"=",
"tables",
"[",
"table_name",
"]",
"if",
"table_metas",
"is",
"None",
":",
"table_meta",
"=",
"self",
".",
"table_dict",
"[",
"table_name",
"]",
"[",
"1",
"]",
"else",
":",
"table_meta",
"=",
"table_metas",
"[",
"table_name",
"]",
"reverse",
"[",
"table_name",
"]",
"=",
"self",
".",
"reverse_transform_table",
"(",
"table",
",",
"table_meta",
")",
"return",
"reverse"
] | python | Transform data back to its original format.
Args:
tables(dict): mapping of table names to `tuple` where each tuple is on the form
(`pandas.DataFrame`, `dict`). The `DataFrame` contains the transformed
data and the `dict` the corresponding meta information.
If not specified, the tables will be retrieved using the meta_file.
table_metas(dict): Full metadata file for the dataset.
missing(bool): Wheter or not use NullTransformer to handle missing values.
Returns:
dict: Map from `str` (table_names) to `pandas.DataFrame` (transformed data). | false |
2,061,415 | def findSettingsModule():
"Find the settings module dot path within django's manage.py file"
try:
with open('manage.py', 'r') as manage:
manage_contents = manage.read()
search = re.search(
r"([\"\'](?P<module>[a-z\.]+)[\"\'])", manage_contents
)
if search: # django version < 1.7
settings_mod = search.group("module")
else:
# in 1.7, manage.py settings declaration looks like:
# os.environ.setdefault(
# "DJANGO_SETTINGS_MODULE", "example_app.settings"
# )
search = re.search(
"\".*?\"(,\\s)??\"(?P<module>.*?)\"\\)$",
manage_contents, re.I | re.S | re.M
)
settings_mod = search.group("module")
os.environ.setdefault('DJANGO_SETTINGS_MODULE', settings_mod)
except IOError as e:
msg = (
str(e) + '\nPlease ensure that you are in the same directory '
'as django\'s "manage.py" file.'
)
raise IOError(chalk.red(msg), None, sys.exc_info()[2])
except AttributeError:
settings_mod = ''
return settings_mod | [
"def",
"findSettingsModule",
"(",
")",
":",
"try",
":",
"with",
"open",
"(",
"'manage.py'",
",",
"'r'",
")",
"as",
"manage",
":",
"manage_contents",
"=",
"manage",
".",
"read",
"(",
")",
"search",
"=",
"re",
".",
"search",
"(",
"r\"([\\\"\\'](?P<module>[a-z\\.]+)[\\\"\\'])\"",
",",
"manage_contents",
")",
"if",
"search",
":",
"settings_mod",
"=",
"search",
".",
"group",
"(",
"\"module\"",
")",
"else",
":",
"search",
"=",
"re",
".",
"search",
"(",
"\"\\\".*?\\\"(,\\\\s)??\\\"(?P<module>.*?)\\\"\\\\)$\"",
",",
"manage_contents",
",",
"re",
".",
"I",
"|",
"re",
".",
"S",
"|",
"re",
".",
"M",
")",
"settings_mod",
"=",
"search",
".",
"group",
"(",
"\"module\"",
")",
"os",
".",
"environ",
".",
"setdefault",
"(",
"'DJANGO_SETTINGS_MODULE'",
",",
"settings_mod",
")",
"except",
"IOError",
"as",
"e",
":",
"msg",
"=",
"(",
"str",
"(",
"e",
")",
"+",
"'\\nPlease ensure that you are in the same directory '",
"'as django\\'s \"manage.py\" file.'",
")",
"raise",
"IOError",
"(",
"chalk",
".",
"red",
"(",
"msg",
")",
",",
"None",
",",
"sys",
".",
"exc_info",
"(",
")",
"[",
"2",
"]",
")",
"except",
"AttributeError",
":",
"settings_mod",
"=",
"''",
"return",
"settings_mod"
] | python | Find the settings module dot path within django's manage.py file | false |
1,977,467 | def get_backend_init_list(backend_vals):
"""Turn backend config dict into command line items."""
cmd_list = []
for (key, val) in backend_vals.items():
cmd_list.append('-backend-config')
cmd_list.append(key + '=' + val)
return cmd_list | [
"def",
"get_backend_init_list",
"(",
"backend_vals",
")",
":",
"cmd_list",
"=",
"[",
"]",
"for",
"(",
"key",
",",
"val",
")",
"in",
"backend_vals",
".",
"items",
"(",
")",
":",
"cmd_list",
".",
"append",
"(",
"'-backend-config'",
")",
"cmd_list",
".",
"append",
"(",
"key",
"+",
"'='",
"+",
"val",
")",
"return",
"cmd_list"
] | python | Turn backend config dict into command line items. | false |
2,172,742 | def setup_job(manager, job_id, tool_id, tool_version, use_metadata=False):
""" Setup new job from these inputs and return dict summarizing state
(used to configure command line).
"""
job_id = manager.setup_job(job_id, tool_id, tool_version)
if use_metadata:
manager.enable_metadata_directory(job_id)
return build_job_config(
job_id=job_id,
job_directory=manager.job_directory(job_id),
system_properties=manager.system_properties(),
tool_id=tool_id,
tool_version=tool_version
) | [
"def",
"setup_job",
"(",
"manager",
",",
"job_id",
",",
"tool_id",
",",
"tool_version",
",",
"use_metadata",
"=",
"False",
")",
":",
"job_id",
"=",
"manager",
".",
"setup_job",
"(",
"job_id",
",",
"tool_id",
",",
"tool_version",
")",
"if",
"use_metadata",
":",
"manager",
".",
"enable_metadata_directory",
"(",
"job_id",
")",
"return",
"build_job_config",
"(",
"job_id",
"=",
"job_id",
",",
"job_directory",
"=",
"manager",
".",
"job_directory",
"(",
"job_id",
")",
",",
"system_properties",
"=",
"manager",
".",
"system_properties",
"(",
")",
",",
"tool_id",
"=",
"tool_id",
",",
"tool_version",
"=",
"tool_version",
")"
] | python | Setup new job from these inputs and return dict summarizing state
(used to configure command line). | false |
1,598,009 | def _proc_gnusparse_00(self, next, pax_headers, buf):
"""Process a GNU tar extended sparse header, version 0.0.
"""
offsets = []
for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf):
offsets.append(int(match.group(1)))
numbytes = []
for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf):
numbytes.append(int(match.group(1)))
next.sparse = list(zip(offsets, numbytes)) | [
"def",
"_proc_gnusparse_00",
"(",
"self",
",",
"next",
",",
"pax_headers",
",",
"buf",
")",
":",
"offsets",
"=",
"[",
"]",
"for",
"match",
"in",
"re",
".",
"finditer",
"(",
"br\"\\d+ GNU.sparse.offset=(\\d+)\\n\"",
",",
"buf",
")",
":",
"offsets",
".",
"append",
"(",
"int",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
")",
"numbytes",
"=",
"[",
"]",
"for",
"match",
"in",
"re",
".",
"finditer",
"(",
"br\"\\d+ GNU.sparse.numbytes=(\\d+)\\n\"",
",",
"buf",
")",
":",
"numbytes",
".",
"append",
"(",
"int",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
")",
"next",
".",
"sparse",
"=",
"list",
"(",
"zip",
"(",
"offsets",
",",
"numbytes",
")",
")"
] | python | Process a GNU tar extended sparse header, version 0.0. | false |
1,780,918 | def visit_Operation(self, expression, *operands):
""" constant folding, if all operands of an expression are a Constant do the math """
operation = self.operations.get(type(expression), None)
if operation is not None and \
all(isinstance(o, Constant) for o in operands):
value = operation(*(x.value for x in operands))
if isinstance(expression, BitVec):
return BitVecConstant(expression.size, value, taint=expression.taint)
else:
isinstance(expression, Bool)
return BoolConstant(value, taint=expression.taint)
else:
if any(operands[i] is not expression.operands[i] for i in range(len(operands))):
expression = self._rebuild(expression, operands)
return expression | [
"def",
"visit_Operation",
"(",
"self",
",",
"expression",
",",
"*",
"operands",
")",
":",
"operation",
"=",
"self",
".",
"operations",
".",
"get",
"(",
"type",
"(",
"expression",
")",
",",
"None",
")",
"if",
"operation",
"is",
"not",
"None",
"and",
"all",
"(",
"isinstance",
"(",
"o",
",",
"Constant",
")",
"for",
"o",
"in",
"operands",
")",
":",
"value",
"=",
"operation",
"(",
"*",
"(",
"x",
".",
"value",
"for",
"x",
"in",
"operands",
")",
")",
"if",
"isinstance",
"(",
"expression",
",",
"BitVec",
")",
":",
"return",
"BitVecConstant",
"(",
"expression",
".",
"size",
",",
"value",
",",
"taint",
"=",
"expression",
".",
"taint",
")",
"else",
":",
"isinstance",
"(",
"expression",
",",
"Bool",
")",
"return",
"BoolConstant",
"(",
"value",
",",
"taint",
"=",
"expression",
".",
"taint",
")",
"else",
":",
"if",
"any",
"(",
"operands",
"[",
"i",
"]",
"is",
"not",
"expression",
".",
"operands",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"operands",
")",
")",
")",
":",
"expression",
"=",
"self",
".",
"_rebuild",
"(",
"expression",
",",
"operands",
")",
"return",
"expression"
] | python | constant folding, if all operands of an expression are a Constant do the math | false |
2,080,604 | def __init__(self, cityCode='', localNumber='', extension='', areaCode='', countryCode=''):
"""Initialises a new 'TelephoneNumber' instance.
@param cityCode: (if applicable) City code.
@param localNumber: Main (local) part of this telephone number.
@param extension: (if applicable) Extension for this telephone number.
@param areaCode: Area or region code.
@param countryCode: Country code.
"""
#: (if applicable) City code.
self.cityCode = cityCode
#: Main (local) part of this telephone number.
self.localNumber = localNumber
#: (if applicable) Extension for this telephone number.
self.extension = extension
#: Area or region code.
self.areaCode = areaCode
#: Country code.
self.countryCode = countryCode | [
"def",
"__init__",
"(",
"self",
",",
"cityCode",
"=",
"''",
",",
"localNumber",
"=",
"''",
",",
"extension",
"=",
"''",
",",
"areaCode",
"=",
"''",
",",
"countryCode",
"=",
"''",
")",
":",
"self",
".",
"cityCode",
"=",
"cityCode",
"self",
".",
"localNumber",
"=",
"localNumber",
"self",
".",
"extension",
"=",
"extension",
"self",
".",
"areaCode",
"=",
"areaCode",
"self",
".",
"countryCode",
"=",
"countryCode"
] | python | Initialises a new 'TelephoneNumber' instance.
@param cityCode: (if applicable) City code.
@param localNumber: Main (local) part of this telephone number.
@param extension: (if applicable) Extension for this telephone number.
@param areaCode: Area or region code.
@param countryCode: Country code. | false |
2,018,062 | def remove_profile(name, s3=False):
"""
Removes a profile from your config
"""
user = os.path.expanduser("~")
if s3:
f = os.path.join(user, S3_PROFILE_ID + name)
else:
f = os.path.join(user, DBPY_PROFILE_ID + name)
try:
try:
open(f)
except:
raise Exception("Profile '{0}' does not exist. Could not find file {1}".format(name, f))
os.remove(f)
except Exception as e:
raise Exception("Could not remove profile {0}! Excpetion: {1}".format(name, e)) | [
"def",
"remove_profile",
"(",
"name",
",",
"s3",
"=",
"False",
")",
":",
"user",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"\"~\"",
")",
"if",
"s3",
":",
"f",
"=",
"os",
".",
"path",
".",
"join",
"(",
"user",
",",
"S3_PROFILE_ID",
"+",
"name",
")",
"else",
":",
"f",
"=",
"os",
".",
"path",
".",
"join",
"(",
"user",
",",
"DBPY_PROFILE_ID",
"+",
"name",
")",
"try",
":",
"try",
":",
"open",
"(",
"f",
")",
"except",
":",
"raise",
"Exception",
"(",
"\"Profile '{0}' does not exist. Could not find file {1}\"",
".",
"format",
"(",
"name",
",",
"f",
")",
")",
"os",
".",
"remove",
"(",
"f",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"Exception",
"(",
"\"Could not remove profile {0}! Excpetion: {1}\"",
".",
"format",
"(",
"name",
",",
"e",
")",
")"
] | python | Removes a profile from your config | false |
2,495,395 | def observe_scanner(self, scanner):
"""
Hooks into multiple events of a scanner.
"""
scanner.observe(scanner.ALL_EVENTS,
self.absorb_args(self.modules.restore))
if self.clear:
scanner.observe(scanner.ALL_EVENTS,
self.absorb_args(self.clear_on_run))
scanner.observe(scanner.ALL_EVENTS, self.absorb_args(self._run))
if self.debug:
scanner.observe('created', echo("callback - created %(file)s"))
scanner.observe('modified', echo("callback - changed %(file)s"))
scanner.observe('deleted', echo("callback - deleted %(file)s"))
self._scanners.append(scanner) | [
"def",
"observe_scanner",
"(",
"self",
",",
"scanner",
")",
":",
"scanner",
".",
"observe",
"(",
"scanner",
".",
"ALL_EVENTS",
",",
"self",
".",
"absorb_args",
"(",
"self",
".",
"modules",
".",
"restore",
")",
")",
"if",
"self",
".",
"clear",
":",
"scanner",
".",
"observe",
"(",
"scanner",
".",
"ALL_EVENTS",
",",
"self",
".",
"absorb_args",
"(",
"self",
".",
"clear_on_run",
")",
")",
"scanner",
".",
"observe",
"(",
"scanner",
".",
"ALL_EVENTS",
",",
"self",
".",
"absorb_args",
"(",
"self",
".",
"_run",
")",
")",
"if",
"self",
".",
"debug",
":",
"scanner",
".",
"observe",
"(",
"'created'",
",",
"echo",
"(",
"\"callback - created %(file)s\"",
")",
")",
"scanner",
".",
"observe",
"(",
"'modified'",
",",
"echo",
"(",
"\"callback - changed %(file)s\"",
")",
")",
"scanner",
".",
"observe",
"(",
"'deleted'",
",",
"echo",
"(",
"\"callback - deleted %(file)s\"",
")",
")",
"self",
".",
"_scanners",
".",
"append",
"(",
"scanner",
")"
] | python | Hooks into multiple events of a scanner. | false |
2,656,568 | def my_func(p1, p2,
first_option='default_value',
second_option=5,
third_option=[4, 3],
last_option=False):
"""Help docstring. v{VERSION}
{message}
"""
if last_option:
raise RuntimeError("Test of DEBUG")
print('%s %s %s %s %s %s' % (p1, p2, first_option, second_option, third_option, last_option)) | [
"def",
"my_func",
"(",
"p1",
",",
"p2",
",",
"first_option",
"=",
"'default_value'",
",",
"second_option",
"=",
"5",
",",
"third_option",
"=",
"[",
"4",
",",
"3",
"]",
",",
"last_option",
"=",
"False",
")",
":",
"if",
"last_option",
":",
"raise",
"RuntimeError",
"(",
"\"Test of DEBUG\"",
")",
"print",
"(",
"'%s %s %s %s %s %s'",
"%",
"(",
"p1",
",",
"p2",
",",
"first_option",
",",
"second_option",
",",
"third_option",
",",
"last_option",
")",
")"
] | python | Help docstring. v{VERSION}
{message} | false |
2,096,906 | def download(ctx, help: bool, symbol: str, namespace: str, agent: str, currency: str):
""" Download the latest prices """
if help:
click.echo(ctx.get_help())
ctx.exit()
app = PriceDbApplication()
app.logger = logger
if currency:
currency = currency.strip()
currency = currency.upper()
# Otherwise download the prices for securities listed in the database.
app.download_prices(currency=currency, agent=agent, symbol=symbol, namespace=namespace) | [
"def",
"download",
"(",
"ctx",
",",
"help",
":",
"bool",
",",
"symbol",
":",
"str",
",",
"namespace",
":",
"str",
",",
"agent",
":",
"str",
",",
"currency",
":",
"str",
")",
":",
"if",
"help",
":",
"click",
".",
"echo",
"(",
"ctx",
".",
"get_help",
"(",
")",
")",
"ctx",
".",
"exit",
"(",
")",
"app",
"=",
"PriceDbApplication",
"(",
")",
"app",
".",
"logger",
"=",
"logger",
"if",
"currency",
":",
"currency",
"=",
"currency",
".",
"strip",
"(",
")",
"currency",
"=",
"currency",
".",
"upper",
"(",
")",
"app",
".",
"download_prices",
"(",
"currency",
"=",
"currency",
",",
"agent",
"=",
"agent",
",",
"symbol",
"=",
"symbol",
",",
"namespace",
"=",
"namespace",
")"
] | python | Download the latest prices | false |
2,146,113 | def send(reg_id, message, **kwargs):
"""
Site: https://developers.google.com
API: https://developers.google.com/web/updates/2016/03/web-push-encryption
Desc: Web Push notifications for Chrome and FireFox
Installation:
pip install 'pywebpush>=0.4.0'
"""
subscription_info = kwargs.pop('subscription_info')
payload = {
"title": kwargs.pop("event"),
"body": message,
"url": kwargs.pop("push_url", None)
}
payload.update(kwargs)
wp = WebPusher(subscription_info)
response = wp.send(
dumps(payload), gcm_key=settings.GCM_KEY,
ttl=kwargs.pop("ttl", 60))
if not response.ok or (
response.text and loads(response.text).get("failure") > 0):
raise GCMError(response.text)
return True | [
"def",
"send",
"(",
"reg_id",
",",
"message",
",",
"**",
"kwargs",
")",
":",
"subscription_info",
"=",
"kwargs",
".",
"pop",
"(",
"'subscription_info'",
")",
"payload",
"=",
"{",
"\"title\"",
":",
"kwargs",
".",
"pop",
"(",
"\"event\"",
")",
",",
"\"body\"",
":",
"message",
",",
"\"url\"",
":",
"kwargs",
".",
"pop",
"(",
"\"push_url\"",
",",
"None",
")",
"}",
"payload",
".",
"update",
"(",
"kwargs",
")",
"wp",
"=",
"WebPusher",
"(",
"subscription_info",
")",
"response",
"=",
"wp",
".",
"send",
"(",
"dumps",
"(",
"payload",
")",
",",
"gcm_key",
"=",
"settings",
".",
"GCM_KEY",
",",
"ttl",
"=",
"kwargs",
".",
"pop",
"(",
"\"ttl\"",
",",
"60",
")",
")",
"if",
"not",
"response",
".",
"ok",
"or",
"(",
"response",
".",
"text",
"and",
"loads",
"(",
"response",
".",
"text",
")",
".",
"get",
"(",
"\"failure\"",
")",
">",
"0",
")",
":",
"raise",
"GCMError",
"(",
"response",
".",
"text",
")",
"return",
"True"
] | python | Site: https://developers.google.com
API: https://developers.google.com/web/updates/2016/03/web-push-encryption
Desc: Web Push notifications for Chrome and FireFox
Installation:
pip install 'pywebpush>=0.4.0' | false |
1,702,726 | def HuntIDToInt(hunt_id):
"""Convert hunt id string to an integer."""
# TODO(user): This code is only needed for a brief period of time when we
# allow running new rel-db flows with old aff4-based hunts. In this scenario
# parent_hunt_id is effectively not used, but it has to be an
# integer. Stripping "H:" from hunt ids then makes the rel-db happy. Remove
# this code when hunts are rel-db only.
if hunt_id.startswith("H:"):
hunt_id = hunt_id[2:]
try:
return int(hunt_id or "0", 16)
except ValueError as e:
raise HuntIDIsNotAnIntegerError(e) | [
"def",
"HuntIDToInt",
"(",
"hunt_id",
")",
":",
"if",
"hunt_id",
".",
"startswith",
"(",
"\"H:\"",
")",
":",
"hunt_id",
"=",
"hunt_id",
"[",
"2",
":",
"]",
"try",
":",
"return",
"int",
"(",
"hunt_id",
"or",
"\"0\"",
",",
"16",
")",
"except",
"ValueError",
"as",
"e",
":",
"raise",
"HuntIDIsNotAnIntegerError",
"(",
"e",
")"
] | python | Convert hunt id string to an integer. | false |
1,631,992 | def create_configmap(
name,
namespace,
data,
source=None,
template=None,
saltenv='base',
**kwargs):
'''
Creates the kubernetes configmap as defined by the user.
CLI Examples::
salt 'minion1' kubernetes.create_configmap \
settings default '{"example.conf": "# example file"}'
salt 'minion2' kubernetes.create_configmap \
name=settings namespace=default data='{"example.conf": "# example file"}'
'''
if source:
data = __read_and_render_yaml_file(source, template, saltenv)
elif data is None:
data = {}
data = __enforce_only_strings_dict(data)
body = kubernetes.client.V1ConfigMap(
metadata=__dict_to_object_meta(name, namespace, {}),
data=data)
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
api_response = api_instance.create_namespaced_config_map(
namespace, body)
return api_response.to_dict()
except (ApiException, HTTPError) as exc:
if isinstance(exc, ApiException) and exc.status == 404:
return None
else:
log.exception(
'Exception when calling '
'CoreV1Api->create_namespaced_config_map'
)
raise CommandExecutionError(exc)
finally:
_cleanup(**cfg) | [
"def",
"create_configmap",
"(",
"name",
",",
"namespace",
",",
"data",
",",
"source",
"=",
"None",
",",
"template",
"=",
"None",
",",
"saltenv",
"=",
"'base'",
",",
"**",
"kwargs",
")",
":",
"if",
"source",
":",
"data",
"=",
"__read_and_render_yaml_file",
"(",
"source",
",",
"template",
",",
"saltenv",
")",
"elif",
"data",
"is",
"None",
":",
"data",
"=",
"{",
"}",
"data",
"=",
"__enforce_only_strings_dict",
"(",
"data",
")",
"body",
"=",
"kubernetes",
".",
"client",
".",
"V1ConfigMap",
"(",
"metadata",
"=",
"__dict_to_object_meta",
"(",
"name",
",",
"namespace",
",",
"{",
"}",
")",
",",
"data",
"=",
"data",
")",
"cfg",
"=",
"_setup_conn",
"(",
"**",
"kwargs",
")",
"try",
":",
"api_instance",
"=",
"kubernetes",
".",
"client",
".",
"CoreV1Api",
"(",
")",
"api_response",
"=",
"api_instance",
".",
"create_namespaced_config_map",
"(",
"namespace",
",",
"body",
")",
"return",
"api_response",
".",
"to_dict",
"(",
")",
"except",
"(",
"ApiException",
",",
"HTTPError",
")",
"as",
"exc",
":",
"if",
"isinstance",
"(",
"exc",
",",
"ApiException",
")",
"and",
"exc",
".",
"status",
"==",
"404",
":",
"return",
"None",
"else",
":",
"log",
".",
"exception",
"(",
"'Exception when calling '",
"'CoreV1Api->create_namespaced_config_map'",
")",
"raise",
"CommandExecutionError",
"(",
"exc",
")",
"finally",
":",
"_cleanup",
"(",
"**",
"cfg",
")"
] | python | Creates the kubernetes configmap as defined by the user.
CLI Examples::
salt 'minion1' kubernetes.create_configmap \
settings default '{"example.conf": "# example file"}'
salt 'minion2' kubernetes.create_configmap \
name=settings namespace=default data='{"example.conf": "# example file"}' | false |
1,904,177 | def set_value(self, pymux, value):
"""
Take a string, and return an integer. Raise SetOptionError when the
given text does not parse to a positive integer.
"""
try:
value = int(value)
if value < 0:
raise ValueError
except ValueError:
raise SetOptionError('Expecting an integer.')
else:
setattr(pymux, self.attribute_name, value) | [
"def",
"set_value",
"(",
"self",
",",
"pymux",
",",
"value",
")",
":",
"try",
":",
"value",
"=",
"int",
"(",
"value",
")",
"if",
"value",
"<",
"0",
":",
"raise",
"ValueError",
"except",
"ValueError",
":",
"raise",
"SetOptionError",
"(",
"'Expecting an integer.'",
")",
"else",
":",
"setattr",
"(",
"pymux",
",",
"self",
".",
"attribute_name",
",",
"value",
")"
] | python | Take a string, and return an integer. Raise SetOptionError when the
given text does not parse to a positive integer. | false |
2,347,666 | def get_html_column_count(html_string):
"""
Gets the number of columns in an html table.
Paramters
---------
html_string : str
Returns
-------
int
The number of columns in the table
"""
try:
from bs4 import BeautifulSoup
except ImportError:
print("ERROR: You must have BeautifulSoup to use html2data")
return
soup = BeautifulSoup(html_string, 'html.parser')
table = soup.find('table')
if not table:
return 0
column_counts = []
trs = table.findAll('tr')
if len(trs) == 0:
return 0
for tr in range(len(trs)):
if tr == 0:
tds = trs[tr].findAll('th')
if len(tds) == 0:
tds = trs[tr].findAll('td')
else:
tds = trs[tr].findAll('td')
count = 0
for td in tds:
if td.has_attr('colspan'):
count += int(td['colspan'])
else:
count += 1
column_counts.append(count)
return max(column_counts) | [
"def",
"get_html_column_count",
"(",
"html_string",
")",
":",
"try",
":",
"from",
"bs4",
"import",
"BeautifulSoup",
"except",
"ImportError",
":",
"print",
"(",
"\"ERROR: You must have BeautifulSoup to use html2data\"",
")",
"return",
"soup",
"=",
"BeautifulSoup",
"(",
"html_string",
",",
"'html.parser'",
")",
"table",
"=",
"soup",
".",
"find",
"(",
"'table'",
")",
"if",
"not",
"table",
":",
"return",
"0",
"column_counts",
"=",
"[",
"]",
"trs",
"=",
"table",
".",
"findAll",
"(",
"'tr'",
")",
"if",
"len",
"(",
"trs",
")",
"==",
"0",
":",
"return",
"0",
"for",
"tr",
"in",
"range",
"(",
"len",
"(",
"trs",
")",
")",
":",
"if",
"tr",
"==",
"0",
":",
"tds",
"=",
"trs",
"[",
"tr",
"]",
".",
"findAll",
"(",
"'th'",
")",
"if",
"len",
"(",
"tds",
")",
"==",
"0",
":",
"tds",
"=",
"trs",
"[",
"tr",
"]",
".",
"findAll",
"(",
"'td'",
")",
"else",
":",
"tds",
"=",
"trs",
"[",
"tr",
"]",
".",
"findAll",
"(",
"'td'",
")",
"count",
"=",
"0",
"for",
"td",
"in",
"tds",
":",
"if",
"td",
".",
"has_attr",
"(",
"'colspan'",
")",
":",
"count",
"+=",
"int",
"(",
"td",
"[",
"'colspan'",
"]",
")",
"else",
":",
"count",
"+=",
"1",
"column_counts",
".",
"append",
"(",
"count",
")",
"return",
"max",
"(",
"column_counts",
")"
] | python | Gets the number of columns in an html table.
Paramters
---------
html_string : str
Returns
-------
int
The number of columns in the table | false |
1,898,668 | def describe_listeners(load_balancer_arn=None, listener_arns=None, client=None):
"""
Permission: elasticloadbalancing:DescribeListeners
"""
kwargs = dict()
if load_balancer_arn:
kwargs.update(dict(LoadBalancerArn=load_balancer_arn))
if listener_arns:
kwargs.update(dict(ListenerArns=listener_arns))
return client.describe_listeners(**kwargs) | [
"def",
"describe_listeners",
"(",
"load_balancer_arn",
"=",
"None",
",",
"listener_arns",
"=",
"None",
",",
"client",
"=",
"None",
")",
":",
"kwargs",
"=",
"dict",
"(",
")",
"if",
"load_balancer_arn",
":",
"kwargs",
".",
"update",
"(",
"dict",
"(",
"LoadBalancerArn",
"=",
"load_balancer_arn",
")",
")",
"if",
"listener_arns",
":",
"kwargs",
".",
"update",
"(",
"dict",
"(",
"ListenerArns",
"=",
"listener_arns",
")",
")",
"return",
"client",
".",
"describe_listeners",
"(",
"**",
"kwargs",
")"
] | python | Permission: elasticloadbalancing:DescribeListeners | false |
2,127,931 | def shutdown_check_handler():
"""This checks the AWS instance data URL to see if there's a pending
shutdown for the instance.
This is useful for AWS spot instances. If there is a pending shutdown posted
to the instance data URL, we'll use the result of this function break out of
the processing loop and shut everything down ASAP before the instance dies.
Returns
-------
bool
- True if the instance is going to die soon.
- False if the instance is still safe.
"""
url = 'http://169.254.169.254/latest/meta-data/spot/instance-action'
try:
resp = requests.get(url, timeout=1.0)
resp.raise_for_status()
stopinfo = resp.json()
if 'action' in stopinfo and stopinfo['action'] in ('stop',
'terminate',
'hibernate'):
stoptime = stopinfo['time']
LOGWARNING('instance is going to %s at %s' % (stopinfo['action'],
stoptime))
resp.close()
return True
else:
resp.close()
return False
except HTTPError as e:
resp.close()
return False
except Exception as e:
resp.close()
return False | [
"def",
"shutdown_check_handler",
"(",
")",
":",
"url",
"=",
"'http://169.254.169.254/latest/meta-data/spot/instance-action'",
"try",
":",
"resp",
"=",
"requests",
".",
"get",
"(",
"url",
",",
"timeout",
"=",
"1.0",
")",
"resp",
".",
"raise_for_status",
"(",
")",
"stopinfo",
"=",
"resp",
".",
"json",
"(",
")",
"if",
"'action'",
"in",
"stopinfo",
"and",
"stopinfo",
"[",
"'action'",
"]",
"in",
"(",
"'stop'",
",",
"'terminate'",
",",
"'hibernate'",
")",
":",
"stoptime",
"=",
"stopinfo",
"[",
"'time'",
"]",
"LOGWARNING",
"(",
"'instance is going to %s at %s'",
"%",
"(",
"stopinfo",
"[",
"'action'",
"]",
",",
"stoptime",
")",
")",
"resp",
".",
"close",
"(",
")",
"return",
"True",
"else",
":",
"resp",
".",
"close",
"(",
")",
"return",
"False",
"except",
"HTTPError",
"as",
"e",
":",
"resp",
".",
"close",
"(",
")",
"return",
"False",
"except",
"Exception",
"as",
"e",
":",
"resp",
".",
"close",
"(",
")",
"return",
"False"
] | python | This checks the AWS instance data URL to see if there's a pending
shutdown for the instance.
This is useful for AWS spot instances. If there is a pending shutdown posted
to the instance data URL, we'll use the result of this function break out of
the processing loop and shut everything down ASAP before the instance dies.
Returns
-------
bool
- True if the instance is going to die soon.
- False if the instance is still safe. | false |
2,512,954 | def count(self):
"""Count the number of elements created, modified and deleted by the
changeset and analyses if it is a possible import, mass modification or
a mass deletion.
"""
xml = get_changeset(self.id)
actions = [action.tag for action in xml.getchildren()]
self.create = actions.count('create')
self.modify = actions.count('modify')
self.delete = actions.count('delete')
self.verify_editor()
try:
if (self.create / len(actions) > self.percentage and
self.create > self.create_threshold and
(self.powerfull_editor or self.create > self.top_threshold)):
self.label_suspicious('possible import')
elif (self.modify / len(actions) > self.percentage and
self.modify > self.modify_threshold):
self.label_suspicious('mass modification')
elif ((self.delete / len(actions) > self.percentage and
self.delete > self.delete_threshold) or
self.delete > self.top_threshold):
self.label_suspicious('mass deletion')
except ZeroDivisionError:
print('It seems this changeset was redacted') | [
"def",
"count",
"(",
"self",
")",
":",
"xml",
"=",
"get_changeset",
"(",
"self",
".",
"id",
")",
"actions",
"=",
"[",
"action",
".",
"tag",
"for",
"action",
"in",
"xml",
".",
"getchildren",
"(",
")",
"]",
"self",
".",
"create",
"=",
"actions",
".",
"count",
"(",
"'create'",
")",
"self",
".",
"modify",
"=",
"actions",
".",
"count",
"(",
"'modify'",
")",
"self",
".",
"delete",
"=",
"actions",
".",
"count",
"(",
"'delete'",
")",
"self",
".",
"verify_editor",
"(",
")",
"try",
":",
"if",
"(",
"self",
".",
"create",
"/",
"len",
"(",
"actions",
")",
">",
"self",
".",
"percentage",
"and",
"self",
".",
"create",
">",
"self",
".",
"create_threshold",
"and",
"(",
"self",
".",
"powerfull_editor",
"or",
"self",
".",
"create",
">",
"self",
".",
"top_threshold",
")",
")",
":",
"self",
".",
"label_suspicious",
"(",
"'possible import'",
")",
"elif",
"(",
"self",
".",
"modify",
"/",
"len",
"(",
"actions",
")",
">",
"self",
".",
"percentage",
"and",
"self",
".",
"modify",
">",
"self",
".",
"modify_threshold",
")",
":",
"self",
".",
"label_suspicious",
"(",
"'mass modification'",
")",
"elif",
"(",
"(",
"self",
".",
"delete",
"/",
"len",
"(",
"actions",
")",
">",
"self",
".",
"percentage",
"and",
"self",
".",
"delete",
">",
"self",
".",
"delete_threshold",
")",
"or",
"self",
".",
"delete",
">",
"self",
".",
"top_threshold",
")",
":",
"self",
".",
"label_suspicious",
"(",
"'mass deletion'",
")",
"except",
"ZeroDivisionError",
":",
"print",
"(",
"'It seems this changeset was redacted'",
")"
] | python | Count the number of elements created, modified and deleted by the
changeset and analyses if it is a possible import, mass modification or
a mass deletion. | false |
2,264,605 | def _reset_stylesheet(self):
""" Resets stylesheet"""
self.setFont(QtGui.QFont(self._font_family,
self._font_size + self._zoom_level))
flg_stylesheet = hasattr(self, '_flg_stylesheet')
if QtWidgets.QApplication.instance().styleSheet() or flg_stylesheet:
self._flg_stylesheet = True
# On Window, if the application once had a stylesheet, we must
# keep on using a stylesheet otherwise strange colors appear
# see https://github.com/OpenCobolIDE/OpenCobolIDE/issues/65
# Also happen on plasma 5
try:
plasma = os.environ['DESKTOP_SESSION'] == 'plasma'
except KeyError:
plasma = False
if sys.platform == 'win32' or plasma:
self.setStyleSheet('''QPlainTextEdit
{
background-color: %s;
color: %s;
}
''' % (self.background.name(), self.foreground.name()))
else:
# on linux/osx we just have to set an empty stylesheet to
# cancel any previous stylesheet and still keep a correct
# style for scrollbars
self.setStyleSheet('')
else:
p = self.palette()
p.setColor(QtGui.QPalette.Base, self.background)
p.setColor(QtGui.QPalette.Text, self.foreground)
p.setColor(QtGui.QPalette.Highlight,
self.selection_background)
p.setColor(QtGui.QPalette.HighlightedText,
self.selection_foreground)
self.setPalette(p)
self.repaint() | [
"def",
"_reset_stylesheet",
"(",
"self",
")",
":",
"self",
".",
"setFont",
"(",
"QtGui",
".",
"QFont",
"(",
"self",
".",
"_font_family",
",",
"self",
".",
"_font_size",
"+",
"self",
".",
"_zoom_level",
")",
")",
"flg_stylesheet",
"=",
"hasattr",
"(",
"self",
",",
"'_flg_stylesheet'",
")",
"if",
"QtWidgets",
".",
"QApplication",
".",
"instance",
"(",
")",
".",
"styleSheet",
"(",
")",
"or",
"flg_stylesheet",
":",
"self",
".",
"_flg_stylesheet",
"=",
"True",
"try",
":",
"plasma",
"=",
"os",
".",
"environ",
"[",
"'DESKTOP_SESSION'",
"]",
"==",
"'plasma'",
"except",
"KeyError",
":",
"plasma",
"=",
"False",
"if",
"sys",
".",
"platform",
"==",
"'win32'",
"or",
"plasma",
":",
"self",
".",
"setStyleSheet",
"(",
"'''QPlainTextEdit\n {\n background-color: %s;\n color: %s;\n }\n '''",
"%",
"(",
"self",
".",
"background",
".",
"name",
"(",
")",
",",
"self",
".",
"foreground",
".",
"name",
"(",
")",
")",
")",
"else",
":",
"self",
".",
"setStyleSheet",
"(",
"''",
")",
"else",
":",
"p",
"=",
"self",
".",
"palette",
"(",
")",
"p",
".",
"setColor",
"(",
"QtGui",
".",
"QPalette",
".",
"Base",
",",
"self",
".",
"background",
")",
"p",
".",
"setColor",
"(",
"QtGui",
".",
"QPalette",
".",
"Text",
",",
"self",
".",
"foreground",
")",
"p",
".",
"setColor",
"(",
"QtGui",
".",
"QPalette",
".",
"Highlight",
",",
"self",
".",
"selection_background",
")",
"p",
".",
"setColor",
"(",
"QtGui",
".",
"QPalette",
".",
"HighlightedText",
",",
"self",
".",
"selection_foreground",
")",
"self",
".",
"setPalette",
"(",
"p",
")",
"self",
".",
"repaint",
"(",
")"
] | python | Resets stylesheet | false |
1,897,268 | def _function_signature(func):
"""Return the signature of a callable as a string.
Parameters
----------
func : callable
Function whose signature to extract.
Returns
-------
sig : string
Signature of the function.
"""
if sys.version_info.major > 2:
# Python 3 already implements this functionality
return func.__name__ + str(inspect.signature(func))
# In Python 2 we have to do it manually, unfortunately
spec = inspect.getargspec(func)
posargs = spec.args
defaults = spec.defaults if spec.defaults is not None else []
varargs = spec.varargs
kwargs = spec.keywords
deflen = 0 if defaults is None else len(defaults)
nodeflen = 0 if posargs is None else len(posargs) - deflen
args = ['{}'.format(arg) for arg in posargs[:nodeflen]]
args.extend('{}={}'.format(arg, dval)
for arg, dval in zip(posargs[nodeflen:], defaults))
if varargs:
args.append('*{}'.format(varargs))
if kwargs:
args.append('**{}'.format(kwargs))
argstr = ', '.join(args)
return '{}({})'.format(func.__name__, argstr) | [
"def",
"_function_signature",
"(",
"func",
")",
":",
"if",
"sys",
".",
"version_info",
".",
"major",
">",
"2",
":",
"return",
"func",
".",
"__name__",
"+",
"str",
"(",
"inspect",
".",
"signature",
"(",
"func",
")",
")",
"spec",
"=",
"inspect",
".",
"getargspec",
"(",
"func",
")",
"posargs",
"=",
"spec",
".",
"args",
"defaults",
"=",
"spec",
".",
"defaults",
"if",
"spec",
".",
"defaults",
"is",
"not",
"None",
"else",
"[",
"]",
"varargs",
"=",
"spec",
".",
"varargs",
"kwargs",
"=",
"spec",
".",
"keywords",
"deflen",
"=",
"0",
"if",
"defaults",
"is",
"None",
"else",
"len",
"(",
"defaults",
")",
"nodeflen",
"=",
"0",
"if",
"posargs",
"is",
"None",
"else",
"len",
"(",
"posargs",
")",
"-",
"deflen",
"args",
"=",
"[",
"'{}'",
".",
"format",
"(",
"arg",
")",
"for",
"arg",
"in",
"posargs",
"[",
":",
"nodeflen",
"]",
"]",
"args",
".",
"extend",
"(",
"'{}={}'",
".",
"format",
"(",
"arg",
",",
"dval",
")",
"for",
"arg",
",",
"dval",
"in",
"zip",
"(",
"posargs",
"[",
"nodeflen",
":",
"]",
",",
"defaults",
")",
")",
"if",
"varargs",
":",
"args",
".",
"append",
"(",
"'*{}'",
".",
"format",
"(",
"varargs",
")",
")",
"if",
"kwargs",
":",
"args",
".",
"append",
"(",
"'**{}'",
".",
"format",
"(",
"kwargs",
")",
")",
"argstr",
"=",
"', '",
".",
"join",
"(",
"args",
")",
"return",
"'{}({})'",
".",
"format",
"(",
"func",
".",
"__name__",
",",
"argstr",
")"
] | python | Return the signature of a callable as a string.
Parameters
----------
func : callable
Function whose signature to extract.
Returns
-------
sig : string
Signature of the function. | false |
2,028,725 | def find_stars(self, data, mask=None):
"""
Find stars in an astronomical image.
Parameters
----------
data : 2D array_like
The 2D image array.
mask : 2D bool array, optional
A boolean mask with the same shape as ``data``, where a
`True` value indicates the corresponding element of ``data``
is masked. Masked pixels are ignored when searching for
stars.
Returns
-------
table : `~astropy.table.Table` or `None`
A table of found stars with the following parameters:
* ``id``: unique object identification number.
* ``xcentroid, ycentroid``: object centroid.
* ``sharpness``: object sharpness.
* ``roundness1``: object roundness based on symmetry.
* ``roundness2``: object roundness based on marginal Gaussian
fits.
* ``npix``: the total number of pixels in the Gaussian kernel
array.
* ``sky``: the input ``sky`` parameter.
* ``peak``: the peak, sky-subtracted, pixel value of the object.
* ``flux``: the object flux calculated as the peak density in
the convolved image divided by the detection threshold. This
derivation matches that of `DAOFIND`_ if ``sky`` is 0.0.
* ``mag``: the object instrumental magnitude calculated as
``-2.5 * log10(flux)``. The derivation matches that of
`DAOFIND`_ if ``sky`` is 0.0.
`None` is returned if no stars are found.
"""
star_cutouts = _find_stars(data, self.kernel, self.threshold_eff,
mask=mask,
exclude_border=self.exclude_border)
if star_cutouts is None:
warnings.warn('No sources were found.', NoDetectionsWarning)
return None
self._star_cutouts = star_cutouts
star_props = []
for star_cutout in star_cutouts:
props = _DAOFind_Properties(star_cutout, self.kernel, self.sky)
if np.isnan(props.dx_hx).any() or np.isnan(props.dy_hy).any():
continue
if (props.sharpness <= self.sharplo or
props.sharpness >= self.sharphi):
continue
if (props.roundness1 <= self.roundlo or
props.roundness1 >= self.roundhi):
continue
if (props.roundness2 <= self.roundlo or
props.roundness2 >= self.roundhi):
continue
if self.peakmax is not None and props.peak >= self.peakmax:
continue
star_props.append(props)
nstars = len(star_props)
if nstars == 0:
warnings.warn('Sources were found, but none pass the sharpness '
'and roundness criteria.', NoDetectionsWarning)
return None
if self.brightest is not None:
fluxes = [props.flux for props in star_props]
idx = sorted(np.argsort(fluxes)[-self.brightest:].tolist())
star_props = [star_props[k] for k in idx]
nstars = len(star_props)
table = Table()
table['id'] = np.arange(nstars) + 1
columns = ('xcentroid', 'ycentroid', 'sharpness', 'roundness1',
'roundness2', 'npix', 'sky', 'peak', 'flux', 'mag')
for column in columns:
table[column] = [getattr(props, column) for props in star_props]
return table | [
"def",
"find_stars",
"(",
"self",
",",
"data",
",",
"mask",
"=",
"None",
")",
":",
"star_cutouts",
"=",
"_find_stars",
"(",
"data",
",",
"self",
".",
"kernel",
",",
"self",
".",
"threshold_eff",
",",
"mask",
"=",
"mask",
",",
"exclude_border",
"=",
"self",
".",
"exclude_border",
")",
"if",
"star_cutouts",
"is",
"None",
":",
"warnings",
".",
"warn",
"(",
"'No sources were found.'",
",",
"NoDetectionsWarning",
")",
"return",
"None",
"self",
".",
"_star_cutouts",
"=",
"star_cutouts",
"star_props",
"=",
"[",
"]",
"for",
"star_cutout",
"in",
"star_cutouts",
":",
"props",
"=",
"_DAOFind_Properties",
"(",
"star_cutout",
",",
"self",
".",
"kernel",
",",
"self",
".",
"sky",
")",
"if",
"np",
".",
"isnan",
"(",
"props",
".",
"dx_hx",
")",
".",
"any",
"(",
")",
"or",
"np",
".",
"isnan",
"(",
"props",
".",
"dy_hy",
")",
".",
"any",
"(",
")",
":",
"continue",
"if",
"(",
"props",
".",
"sharpness",
"<=",
"self",
".",
"sharplo",
"or",
"props",
".",
"sharpness",
">=",
"self",
".",
"sharphi",
")",
":",
"continue",
"if",
"(",
"props",
".",
"roundness1",
"<=",
"self",
".",
"roundlo",
"or",
"props",
".",
"roundness1",
">=",
"self",
".",
"roundhi",
")",
":",
"continue",
"if",
"(",
"props",
".",
"roundness2",
"<=",
"self",
".",
"roundlo",
"or",
"props",
".",
"roundness2",
">=",
"self",
".",
"roundhi",
")",
":",
"continue",
"if",
"self",
".",
"peakmax",
"is",
"not",
"None",
"and",
"props",
".",
"peak",
">=",
"self",
".",
"peakmax",
":",
"continue",
"star_props",
".",
"append",
"(",
"props",
")",
"nstars",
"=",
"len",
"(",
"star_props",
")",
"if",
"nstars",
"==",
"0",
":",
"warnings",
".",
"warn",
"(",
"'Sources were found, but none pass the sharpness '",
"'and roundness criteria.'",
",",
"NoDetectionsWarning",
")",
"return",
"None",
"if",
"self",
".",
"brightest",
"is",
"not",
"None",
":",
"fluxes",
"=",
"[",
"props",
".",
"flux",
"for",
"props",
"in",
"star_props",
"]",
"idx",
"=",
"sorted",
"(",
"np",
".",
"argsort",
"(",
"fluxes",
")",
"[",
"-",
"self",
".",
"brightest",
":",
"]",
".",
"tolist",
"(",
")",
")",
"star_props",
"=",
"[",
"star_props",
"[",
"k",
"]",
"for",
"k",
"in",
"idx",
"]",
"nstars",
"=",
"len",
"(",
"star_props",
")",
"table",
"=",
"Table",
"(",
")",
"table",
"[",
"'id'",
"]",
"=",
"np",
".",
"arange",
"(",
"nstars",
")",
"+",
"1",
"columns",
"=",
"(",
"'xcentroid'",
",",
"'ycentroid'",
",",
"'sharpness'",
",",
"'roundness1'",
",",
"'roundness2'",
",",
"'npix'",
",",
"'sky'",
",",
"'peak'",
",",
"'flux'",
",",
"'mag'",
")",
"for",
"column",
"in",
"columns",
":",
"table",
"[",
"column",
"]",
"=",
"[",
"getattr",
"(",
"props",
",",
"column",
")",
"for",
"props",
"in",
"star_props",
"]",
"return",
"table"
] | python | Find stars in an astronomical image.
Parameters
----------
data : 2D array_like
The 2D image array.
mask : 2D bool array, optional
A boolean mask with the same shape as ``data``, where a
`True` value indicates the corresponding element of ``data``
is masked. Masked pixels are ignored when searching for
stars.
Returns
-------
table : `~astropy.table.Table` or `None`
A table of found stars with the following parameters:
* ``id``: unique object identification number.
* ``xcentroid, ycentroid``: object centroid.
* ``sharpness``: object sharpness.
* ``roundness1``: object roundness based on symmetry.
* ``roundness2``: object roundness based on marginal Gaussian
fits.
* ``npix``: the total number of pixels in the Gaussian kernel
array.
* ``sky``: the input ``sky`` parameter.
* ``peak``: the peak, sky-subtracted, pixel value of the object.
* ``flux``: the object flux calculated as the peak density in
the convolved image divided by the detection threshold. This
derivation matches that of `DAOFIND`_ if ``sky`` is 0.0.
* ``mag``: the object instrumental magnitude calculated as
``-2.5 * log10(flux)``. The derivation matches that of
`DAOFIND`_ if ``sky`` is 0.0.
`None` is returned if no stars are found. | false |
2,510,116 | def __init__(self, tree):
"""
Initialise the data structure, compute m and M.
"""
info = self._precompute(tree._tree)
m, M = self._get_vectors(tree._tree, info)
self.little_m = m
self.big_m = M
self.tree = tree | [
"def",
"__init__",
"(",
"self",
",",
"tree",
")",
":",
"info",
"=",
"self",
".",
"_precompute",
"(",
"tree",
".",
"_tree",
")",
"m",
",",
"M",
"=",
"self",
".",
"_get_vectors",
"(",
"tree",
".",
"_tree",
",",
"info",
")",
"self",
".",
"little_m",
"=",
"m",
"self",
".",
"big_m",
"=",
"M",
"self",
".",
"tree",
"=",
"tree"
] | python | Initialise the data structure, compute m and M. | false |
2,030,263 | def update(cls, id, name, size, quantity, password, sshkey, upgrade,
console, snapshot_profile, reset_mysql_password, background):
"""Update a PaaS instance."""
if not background and not cls.intty():
background = True
paas_params = {}
if name:
paas_params['name'] = name
if size:
paas_params['size'] = size
if quantity:
paas_params['quantity'] = quantity
if password:
paas_params['password'] = password
paas_params.update(cls.convert_sshkey(sshkey))
if upgrade:
paas_params['upgrade'] = upgrade
if console:
paas_params['console'] = console
# XXX to delete a snapshot_profile the value has to be an empty string
if snapshot_profile is not None:
paas_params['snapshot_profile'] = snapshot_profile
if reset_mysql_password:
paas_params['reset_mysql_password'] = reset_mysql_password
result = cls.call('paas.update', cls.usable_id(id), paas_params)
if background:
return result
# interactive mode, run a progress bar
cls.echo('Updating your PaaS instance.')
cls.display_progress(result) | [
"def",
"update",
"(",
"cls",
",",
"id",
",",
"name",
",",
"size",
",",
"quantity",
",",
"password",
",",
"sshkey",
",",
"upgrade",
",",
"console",
",",
"snapshot_profile",
",",
"reset_mysql_password",
",",
"background",
")",
":",
"if",
"not",
"background",
"and",
"not",
"cls",
".",
"intty",
"(",
")",
":",
"background",
"=",
"True",
"paas_params",
"=",
"{",
"}",
"if",
"name",
":",
"paas_params",
"[",
"'name'",
"]",
"=",
"name",
"if",
"size",
":",
"paas_params",
"[",
"'size'",
"]",
"=",
"size",
"if",
"quantity",
":",
"paas_params",
"[",
"'quantity'",
"]",
"=",
"quantity",
"if",
"password",
":",
"paas_params",
"[",
"'password'",
"]",
"=",
"password",
"paas_params",
".",
"update",
"(",
"cls",
".",
"convert_sshkey",
"(",
"sshkey",
")",
")",
"if",
"upgrade",
":",
"paas_params",
"[",
"'upgrade'",
"]",
"=",
"upgrade",
"if",
"console",
":",
"paas_params",
"[",
"'console'",
"]",
"=",
"console",
"if",
"snapshot_profile",
"is",
"not",
"None",
":",
"paas_params",
"[",
"'snapshot_profile'",
"]",
"=",
"snapshot_profile",
"if",
"reset_mysql_password",
":",
"paas_params",
"[",
"'reset_mysql_password'",
"]",
"=",
"reset_mysql_password",
"result",
"=",
"cls",
".",
"call",
"(",
"'paas.update'",
",",
"cls",
".",
"usable_id",
"(",
"id",
")",
",",
"paas_params",
")",
"if",
"background",
":",
"return",
"result",
"cls",
".",
"echo",
"(",
"'Updating your PaaS instance.'",
")",
"cls",
".",
"display_progress",
"(",
"result",
")"
] | python | Update a PaaS instance. | false |