nwo
stringlengths 5
106
| sha
stringlengths 40
40
| path
stringlengths 4
174
| language
stringclasses 1
value | identifier
stringlengths 1
140
| parameters
stringlengths 0
87.7k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
426k
| docstring
stringlengths 0
64.3k
| docstring_summary
stringlengths 0
26.3k
| docstring_tokens
sequence | function
stringlengths 18
4.83M
| function_tokens
sequence | url
stringlengths 83
304
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
gnuradio/pybombs | 17044241bf835b93571026b112f179f2db7448a4 | pybombs/commands/recipes.py | python | Recipes.run_update | (self) | pybombs recipes update [alias] | pybombs recipes update [alias] | [
"pybombs",
"recipes",
"update",
"[",
"alias",
"]"
] | def run_update(self):
"""
pybombs recipes update [alias]
"""
# TODO allow directories
aliases_to_update = self.args.alias or self.cfg.get_named_recipe_sources().keys()
if not all([self.update_recipe_repo(x) for x in aliases_to_update]):
return -1 | [
"def",
"run_update",
"(",
"self",
")",
":",
"# TODO allow directories",
"aliases_to_update",
"=",
"self",
".",
"args",
".",
"alias",
"or",
"self",
".",
"cfg",
".",
"get_named_recipe_sources",
"(",
")",
".",
"keys",
"(",
")",
"if",
"not",
"all",
"(",
"[",
"self",
".",
"update_recipe_repo",
"(",
"x",
")",
"for",
"x",
"in",
"aliases_to_update",
"]",
")",
":",
"return",
"-",
"1"
] | https://github.com/gnuradio/pybombs/blob/17044241bf835b93571026b112f179f2db7448a4/pybombs/commands/recipes.py#L190-L197 |
||
rhinstaller/anaconda | 63edc8680f1b05cbfe11bef28703acba808c5174 | pyanaconda/core/kickstart/addon.py | python | AddonData.handle_line | (self, line, line_number=None) | Handle one line of the section.
:param line: a line to parse
:param line_number: a line number
:raise: KickstartParseError for invalid lines | Handle one line of the section. | [
"Handle",
"one",
"line",
"of",
"the",
"section",
"."
] | def handle_line(self, line, line_number=None):
"""Handle one line of the section.
:param line: a line to parse
:param line_number: a line number
:raise: KickstartParseError for invalid lines
"""
pass | [
"def",
"handle_line",
"(",
"self",
",",
"line",
",",
"line_number",
"=",
"None",
")",
":",
"pass"
] | https://github.com/rhinstaller/anaconda/blob/63edc8680f1b05cbfe11bef28703acba808c5174/pyanaconda/core/kickstart/addon.py#L70-L77 |
||
picklepete/pyicloud | bab549a593b1f2554de8d0eefa0b053c18e09f6f | pyicloud/services/photos.py | python | PhotoAsset.filename | (self) | return base64.b64decode(
self._master_record["fields"]["filenameEnc"]["value"]
).decode("utf-8") | Gets the photo file name. | Gets the photo file name. | [
"Gets",
"the",
"photo",
"file",
"name",
"."
] | def filename(self):
"""Gets the photo file name."""
return base64.b64decode(
self._master_record["fields"]["filenameEnc"]["value"]
).decode("utf-8") | [
"def",
"filename",
"(",
"self",
")",
":",
"return",
"base64",
".",
"b64decode",
"(",
"self",
".",
"_master_record",
"[",
"\"fields\"",
"]",
"[",
"\"filenameEnc\"",
"]",
"[",
"\"value\"",
"]",
")",
".",
"decode",
"(",
"\"utf-8\"",
")"
] | https://github.com/picklepete/pyicloud/blob/bab549a593b1f2554de8d0eefa0b053c18e09f6f/pyicloud/services/photos.py#L519-L523 |
|
rhinstaller/anaconda | 63edc8680f1b05cbfe11bef28703acba808c5174 | pyanaconda/ui/gui/spokes/__init__.py | python | NormalSpoke.clear_info | (self) | Clear the last set warning message and call the ancestors method. | Clear the last set warning message and call the ancestors method. | [
"Clear",
"the",
"last",
"set",
"warning",
"message",
"and",
"call",
"the",
"ancestors",
"method",
"."
] | def clear_info(self):
"""Clear the last set warning message and call the ancestors method."""
self._current_warning_message = ""
super().clear_info() | [
"def",
"clear_info",
"(",
"self",
")",
":",
"self",
".",
"_current_warning_message",
"=",
"\"\"",
"super",
"(",
")",
".",
"clear_info",
"(",
")"
] | https://github.com/rhinstaller/anaconda/blob/63edc8680f1b05cbfe11bef28703acba808c5174/pyanaconda/ui/gui/spokes/__init__.py#L75-L78 |
||
TencentCloud/tencentcloud-sdk-python | 3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2 | tencentcloud/iotvideo/v20191126/models.py | python | CreateAnonymousAccessTokenRequest.__init__ | (self) | r"""
:param TtlMinutes: Token的TTL(time to alive)分钟数,最大值1440(即24小时)
:type TtlMinutes: int
:param Tid: 设备ID。创建Token时, 此参数为必须项
:type Tid: str
:param OldAccessToken: 旧的AccessToken。续期Token时,此参数为必须
:type OldAccessToken: str | r"""
:param TtlMinutes: Token的TTL(time to alive)分钟数,最大值1440(即24小时)
:type TtlMinutes: int
:param Tid: 设备ID。创建Token时, 此参数为必须项
:type Tid: str
:param OldAccessToken: 旧的AccessToken。续期Token时,此参数为必须
:type OldAccessToken: str | [
"r",
":",
"param",
"TtlMinutes",
":",
"Token的TTL",
"(",
"time",
"to",
"alive",
")",
"分钟数",
"最大值1440",
"(",
"即24小时",
")",
":",
"type",
"TtlMinutes",
":",
"int",
":",
"param",
"Tid",
":",
"设备ID。创建Token时",
"此参数为必须项",
":",
"type",
"Tid",
":",
"str",
":",
"param",
"OldAccessToken",
":",
"旧的AccessToken。续期Token时,此参数为必须",
":",
"type",
"OldAccessToken",
":",
"str"
] | def __init__(self):
r"""
:param TtlMinutes: Token的TTL(time to alive)分钟数,最大值1440(即24小时)
:type TtlMinutes: int
:param Tid: 设备ID。创建Token时, 此参数为必须项
:type Tid: str
:param OldAccessToken: 旧的AccessToken。续期Token时,此参数为必须
:type OldAccessToken: str
"""
self.TtlMinutes = None
self.Tid = None
self.OldAccessToken = None | [
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"TtlMinutes",
"=",
"None",
"self",
".",
"Tid",
"=",
"None",
"self",
".",
"OldAccessToken",
"=",
"None"
] | https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/iotvideo/v20191126/models.py#L172-L183 |
||
DataDog/integrations-core | 934674b29d94b70ccc008f76ea172d0cdae05e1e | vertica/datadog_checks/vertica/config_models/defaults.py | python | instance_tls_validate_hostname | (field, value) | return True | [] | def instance_tls_validate_hostname(field, value):
return True | [
"def",
"instance_tls_validate_hostname",
"(",
"field",
",",
"value",
")",
":",
"return",
"True"
] | https://github.com/DataDog/integrations-core/blob/934674b29d94b70ccc008f76ea172d0cdae05e1e/vertica/datadog_checks/vertica/config_models/defaults.py#L101-L102 |
|||
numba/numba | bf480b9e0da858a65508c2b17759a72ee6a44c51 | numba/cpython/heapq.py | python | _siftup_max | (heap, pos) | [] | def _siftup_max(heap, pos):
endpos = len(heap)
startpos = pos
newitem = heap[pos]
childpos = 2 * pos + 1
while childpos < endpos:
rightpos = childpos + 1
if rightpos < endpos and not heap[rightpos] < heap[childpos]:
childpos = rightpos
heap[pos] = heap[childpos]
pos = childpos
childpos = 2 * pos + 1
heap[pos] = newitem
_siftdown_max(heap, startpos, pos) | [
"def",
"_siftup_max",
"(",
"heap",
",",
"pos",
")",
":",
"endpos",
"=",
"len",
"(",
"heap",
")",
"startpos",
"=",
"pos",
"newitem",
"=",
"heap",
"[",
"pos",
"]",
"childpos",
"=",
"2",
"*",
"pos",
"+",
"1",
"while",
"childpos",
"<",
"endpos",
":",
"rightpos",
"=",
"childpos",
"+",
"1",
"if",
"rightpos",
"<",
"endpos",
"and",
"not",
"heap",
"[",
"rightpos",
"]",
"<",
"heap",
"[",
"childpos",
"]",
":",
"childpos",
"=",
"rightpos",
"heap",
"[",
"pos",
"]",
"=",
"heap",
"[",
"childpos",
"]",
"pos",
"=",
"childpos",
"childpos",
"=",
"2",
"*",
"pos",
"+",
"1",
"heap",
"[",
"pos",
"]",
"=",
"newitem",
"_siftdown_max",
"(",
"heap",
",",
"startpos",
",",
"pos",
")"
] | https://github.com/numba/numba/blob/bf480b9e0da858a65508c2b17759a72ee6a44c51/numba/cpython/heapq.py#L64-L81 |
||||
openhatch/oh-mainline | ce29352a034e1223141dcc2f317030bbc3359a51 | vendor/packages/Django/django/db/backends/postgresql_psycopg2/version.py | python | get_version | (connection) | Returns an integer representing the major, minor and revision number of the
server. Format is the one used for the return value of libpq
PQServerVersion()/``server_version`` connection attribute (available in
newer psycopg2 versions.)
For example, 80304 for 8.3.4. The last two digits will be 00 in the case of
releases (e.g., 80400 for 'PostgreSQL 8.4') or in the case of beta and
prereleases (e.g. 90100 for 'PostgreSQL 9.1beta2').
PQServerVersion()/``server_version`` doesn't execute a query so try that
first, then fallback to a ``SELECT version()`` query. | Returns an integer representing the major, minor and revision number of the
server. Format is the one used for the return value of libpq
PQServerVersion()/``server_version`` connection attribute (available in
newer psycopg2 versions.) | [
"Returns",
"an",
"integer",
"representing",
"the",
"major",
"minor",
"and",
"revision",
"number",
"of",
"the",
"server",
".",
"Format",
"is",
"the",
"one",
"used",
"for",
"the",
"return",
"value",
"of",
"libpq",
"PQServerVersion",
"()",
"/",
"server_version",
"connection",
"attribute",
"(",
"available",
"in",
"newer",
"psycopg2",
"versions",
".",
")"
] | def get_version(connection):
"""
Returns an integer representing the major, minor and revision number of the
server. Format is the one used for the return value of libpq
PQServerVersion()/``server_version`` connection attribute (available in
newer psycopg2 versions.)
For example, 80304 for 8.3.4. The last two digits will be 00 in the case of
releases (e.g., 80400 for 'PostgreSQL 8.4') or in the case of beta and
prereleases (e.g. 90100 for 'PostgreSQL 9.1beta2').
PQServerVersion()/``server_version`` doesn't execute a query so try that
first, then fallback to a ``SELECT version()`` query.
"""
if hasattr(connection, 'server_version'):
return connection.server_version
else:
cursor = connection.cursor()
cursor.execute("SELECT version()")
return _parse_version(cursor.fetchone()[0]) | [
"def",
"get_version",
"(",
"connection",
")",
":",
"if",
"hasattr",
"(",
"connection",
",",
"'server_version'",
")",
":",
"return",
"connection",
".",
"server_version",
"else",
":",
"cursor",
"=",
"connection",
".",
"cursor",
"(",
")",
"cursor",
".",
"execute",
"(",
"\"SELECT version()\"",
")",
"return",
"_parse_version",
"(",
"cursor",
".",
"fetchone",
"(",
")",
"[",
"0",
"]",
")"
] | https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/Django/django/db/backends/postgresql_psycopg2/version.py#L24-L43 |
||
django-parler/django-parler | 577ca2f4a80713a9272c48db30e914a4d9332358 | parler/views.py | python | LanguageChoiceMixin.get_default_language | (self, object=None) | return None | Return the default language to use, if no language parameter is given.
By default, it uses the default parler-language. | Return the default language to use, if no language parameter is given.
By default, it uses the default parler-language. | [
"Return",
"the",
"default",
"language",
"to",
"use",
"if",
"no",
"language",
"parameter",
"is",
"given",
".",
"By",
"default",
"it",
"uses",
"the",
"default",
"parler",
"-",
"language",
"."
] | def get_default_language(self, object=None):
"""
Return the default language to use, if no language parameter is given.
By default, it uses the default parler-language.
"""
# Some users may want to override this, to return get_language()
return None | [
"def",
"get_default_language",
"(",
"self",
",",
"object",
"=",
"None",
")",
":",
"# Some users may want to override this, to return get_language()",
"return",
"None"
] | https://github.com/django-parler/django-parler/blob/577ca2f4a80713a9272c48db30e914a4d9332358/parler/views.py#L225-L231 |
|
kanzure/nanoengineer | 874e4c9f8a9190f093625b267f9767e19f82e6c4 | cad/src/dna/model/ChainAtomMarker.py | python | ChainAtomMarker.writemmp | (self, mapping) | return Jig.writemmp(self, mapping) | [extends superclass method] | [extends superclass method] | [
"[",
"extends",
"superclass",
"method",
"]"
] | def writemmp(self, mapping):
"""
[extends superclass method]
"""
# check a few things, then call superclass method
try:
assert not self.is_homeless() # redundant as of 080111, that's ok
assert len(self.atoms) in (1, _NUMBER_OF_MARKER_ATOMS)
self._check_atom_order()
except:
#bruce 080317, for debugging the save file traceback in
# "assert not self.is_homeless()" (above) in bug 2673,
# happens when saving after region select + delete of any
# duplex; fixed now
msg = "\n*** BUG: exception in checks before DnaMarker.writemmp; " \
"continuing, but beware of errors when reopening the file"
print_compact_traceback( msg + ": ")
pass
return Jig.writemmp(self, mapping) | [
"def",
"writemmp",
"(",
"self",
",",
"mapping",
")",
":",
"# check a few things, then call superclass method",
"try",
":",
"assert",
"not",
"self",
".",
"is_homeless",
"(",
")",
"# redundant as of 080111, that's ok",
"assert",
"len",
"(",
"self",
".",
"atoms",
")",
"in",
"(",
"1",
",",
"_NUMBER_OF_MARKER_ATOMS",
")",
"self",
".",
"_check_atom_order",
"(",
")",
"except",
":",
"#bruce 080317, for debugging the save file traceback in",
"# \"assert not self.is_homeless()\" (above) in bug 2673,",
"# happens when saving after region select + delete of any",
"# duplex; fixed now",
"msg",
"=",
"\"\\n*** BUG: exception in checks before DnaMarker.writemmp; \"",
"\"continuing, but beware of errors when reopening the file\"",
"print_compact_traceback",
"(",
"msg",
"+",
"\": \"",
")",
"pass",
"return",
"Jig",
".",
"writemmp",
"(",
"self",
",",
"mapping",
")"
] | https://github.com/kanzure/nanoengineer/blob/874e4c9f8a9190f093625b267f9767e19f82e6c4/cad/src/dna/model/ChainAtomMarker.py#L164-L183 |
|
cloud-custodian/cloud-custodian | 1ce1deb2d0f0832d6eb8839ef74b4c9e397128de | tools/dev/dockerpkg.py | python | cli | () | Custodian Docker Packaging Tool
slices, dices, and blends :-) | Custodian Docker Packaging Tool | [
"Custodian",
"Docker",
"Packaging",
"Tool"
] | def cli():
"""Custodian Docker Packaging Tool
slices, dices, and blends :-)
"""
logging.basicConfig(
level=logging.INFO, format="%(asctime)s:%(levelname)s %(message)s"
)
logging.getLogger("docker").setLevel(logging.INFO)
logging.getLogger("urllib3").setLevel(logging.INFO)
for name, image in list(ImageMap.items()):
ImageMap[name + "-distroless"] = image.clone(
dict(
tag_prefix="distroless-",
base_build_image="debian:10-slim",
base_target_image="gcr.io/distroless/python3-debian10",
),
target=[TARGET_DISTROLESS_STAGE],
) | [
"def",
"cli",
"(",
")",
":",
"logging",
".",
"basicConfig",
"(",
"level",
"=",
"logging",
".",
"INFO",
",",
"format",
"=",
"\"%(asctime)s:%(levelname)s %(message)s\"",
")",
"logging",
".",
"getLogger",
"(",
"\"docker\"",
")",
".",
"setLevel",
"(",
"logging",
".",
"INFO",
")",
"logging",
".",
"getLogger",
"(",
"\"urllib3\"",
")",
".",
"setLevel",
"(",
"logging",
".",
"INFO",
")",
"for",
"name",
",",
"image",
"in",
"list",
"(",
"ImageMap",
".",
"items",
"(",
")",
")",
":",
"ImageMap",
"[",
"name",
"+",
"\"-distroless\"",
"]",
"=",
"image",
".",
"clone",
"(",
"dict",
"(",
"tag_prefix",
"=",
"\"distroless-\"",
",",
"base_build_image",
"=",
"\"debian:10-slim\"",
",",
"base_target_image",
"=",
"\"gcr.io/distroless/python3-debian10\"",
",",
")",
",",
"target",
"=",
"[",
"TARGET_DISTROLESS_STAGE",
"]",
",",
")"
] | https://github.com/cloud-custodian/cloud-custodian/blob/1ce1deb2d0f0832d6eb8839ef74b4c9e397128de/tools/dev/dockerpkg.py#L240-L259 |
||
IntelLabs/nlp-architect | 60afd0dd1bfd74f01b4ac8f613cb484777b80284 | examples/sparse_gnmt/train.py | python | add_info_summaries | (summary_writer, global_step, info) | Add stuffs in info to summaries. | Add stuffs in info to summaries. | [
"Add",
"stuffs",
"in",
"info",
"to",
"summaries",
"."
] | def add_info_summaries(summary_writer, global_step, info):
"""Add stuffs in info to summaries."""
excluded_list = ["learning_rate"]
for key in info:
if key not in excluded_list:
utils.add_summary(summary_writer, global_step, key, info[key]) | [
"def",
"add_info_summaries",
"(",
"summary_writer",
",",
"global_step",
",",
"info",
")",
":",
"excluded_list",
"=",
"[",
"\"learning_rate\"",
"]",
"for",
"key",
"in",
"info",
":",
"if",
"key",
"not",
"in",
"excluded_list",
":",
"utils",
".",
"add_summary",
"(",
"summary_writer",
",",
"global_step",
",",
"key",
",",
"info",
"[",
"key",
"]",
")"
] | https://github.com/IntelLabs/nlp-architect/blob/60afd0dd1bfd74f01b4ac8f613cb484777b80284/examples/sparse_gnmt/train.py#L462-L467 |
||
msracver/Deep-Exemplar-based-Colorization | 964026106bb51171a3c58be58d5c117e2f62bb4c | similarity_subnet/scripts/cpp_lint.py | python | RemoveMultiLineCommentsFromRange | (lines, begin, end) | Clears a range of lines for multi-line comments. | Clears a range of lines for multi-line comments. | [
"Clears",
"a",
"range",
"of",
"lines",
"for",
"multi",
"-",
"line",
"comments",
"."
] | def RemoveMultiLineCommentsFromRange(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // dummy comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '// dummy' | [
"def",
"RemoveMultiLineCommentsFromRange",
"(",
"lines",
",",
"begin",
",",
"end",
")",
":",
"# Having // dummy comments makes the lines non-empty, so we will not get",
"# unnecessary blank line warnings later in the code.",
"for",
"i",
"in",
"range",
"(",
"begin",
",",
"end",
")",
":",
"lines",
"[",
"i",
"]",
"=",
"'// dummy'"
] | https://github.com/msracver/Deep-Exemplar-based-Colorization/blob/964026106bb51171a3c58be58d5c117e2f62bb4c/similarity_subnet/scripts/cpp_lint.py#L1143-L1148 |
||
dropbox/dropbox-sdk-python | 015437429be224732990041164a21a0501235db1 | dropbox/team_log.py | python | EventType.team_merge_request_sent_shown_to_secondary_team | (cls, val) | return cls('team_merge_request_sent_shown_to_secondary_team', val) | Create an instance of this class set to the
``team_merge_request_sent_shown_to_secondary_team`` tag with value
``val``.
:param TeamMergeRequestSentShownToSecondaryTeamType val:
:rtype: EventType | Create an instance of this class set to the
``team_merge_request_sent_shown_to_secondary_team`` tag with value
``val``. | [
"Create",
"an",
"instance",
"of",
"this",
"class",
"set",
"to",
"the",
"team_merge_request_sent_shown_to_secondary_team",
"tag",
"with",
"value",
"val",
"."
] | def team_merge_request_sent_shown_to_secondary_team(cls, val):
"""
Create an instance of this class set to the
``team_merge_request_sent_shown_to_secondary_team`` tag with value
``val``.
:param TeamMergeRequestSentShownToSecondaryTeamType val:
:rtype: EventType
"""
return cls('team_merge_request_sent_shown_to_secondary_team', val) | [
"def",
"team_merge_request_sent_shown_to_secondary_team",
"(",
"cls",
",",
"val",
")",
":",
"return",
"cls",
"(",
"'team_merge_request_sent_shown_to_secondary_team'",
",",
"val",
")"
] | https://github.com/dropbox/dropbox-sdk-python/blob/015437429be224732990041164a21a0501235db1/dropbox/team_log.py#L28390-L28399 |
|
brendano/tweetmotif | 1b0b1e3a941745cd5a26eba01f554688b7c4b27e | everything_else/djfrontend/django-1.0.2/utils/_decimal.py | python | Decimal.__mul__ | (self, other, context=None) | return ans | Return self * other.
(+-) INF * 0 (or its reverse) raise InvalidOperation. | Return self * other. | [
"Return",
"self",
"*",
"other",
"."
] | def __mul__(self, other, context=None):
"""Return self * other.
(+-) INF * 0 (or its reverse) raise InvalidOperation.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
resultsign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
if not other:
return context._raise_error(InvalidOperation, '(+-)INF * 0')
return Infsign[resultsign]
if other._isinfinity():
if not self:
return context._raise_error(InvalidOperation, '0 * (+-)INF')
return Infsign[resultsign]
resultexp = self._exp + other._exp
shouldround = context._rounding_decision == ALWAYS_ROUND
# Special case for multiplying by zero
if not self or not other:
ans = Decimal((resultsign, (0,), resultexp))
if shouldround:
#Fixing in case the exponent is out of bounds
ans = ans._fix(context)
return ans
# Special case for multiplying by power of 10
if self._int == (1,):
ans = Decimal((resultsign, other._int, resultexp))
if shouldround:
ans = ans._fix(context)
return ans
if other._int == (1,):
ans = Decimal((resultsign, self._int, resultexp))
if shouldround:
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
ans = Decimal( (resultsign, map(int, str(op1.int * op2.int)), resultexp))
if shouldround:
ans = ans._fix(context)
return ans | [
"def",
"__mul__",
"(",
"self",
",",
"other",
",",
"context",
"=",
"None",
")",
":",
"other",
"=",
"_convert_other",
"(",
"other",
")",
"if",
"other",
"is",
"NotImplemented",
":",
"return",
"other",
"if",
"context",
"is",
"None",
":",
"context",
"=",
"getcontext",
"(",
")",
"resultsign",
"=",
"self",
".",
"_sign",
"^",
"other",
".",
"_sign",
"if",
"self",
".",
"_is_special",
"or",
"other",
".",
"_is_special",
":",
"ans",
"=",
"self",
".",
"_check_nans",
"(",
"other",
",",
"context",
")",
"if",
"ans",
":",
"return",
"ans",
"if",
"self",
".",
"_isinfinity",
"(",
")",
":",
"if",
"not",
"other",
":",
"return",
"context",
".",
"_raise_error",
"(",
"InvalidOperation",
",",
"'(+-)INF * 0'",
")",
"return",
"Infsign",
"[",
"resultsign",
"]",
"if",
"other",
".",
"_isinfinity",
"(",
")",
":",
"if",
"not",
"self",
":",
"return",
"context",
".",
"_raise_error",
"(",
"InvalidOperation",
",",
"'0 * (+-)INF'",
")",
"return",
"Infsign",
"[",
"resultsign",
"]",
"resultexp",
"=",
"self",
".",
"_exp",
"+",
"other",
".",
"_exp",
"shouldround",
"=",
"context",
".",
"_rounding_decision",
"==",
"ALWAYS_ROUND",
"# Special case for multiplying by zero",
"if",
"not",
"self",
"or",
"not",
"other",
":",
"ans",
"=",
"Decimal",
"(",
"(",
"resultsign",
",",
"(",
"0",
",",
")",
",",
"resultexp",
")",
")",
"if",
"shouldround",
":",
"#Fixing in case the exponent is out of bounds",
"ans",
"=",
"ans",
".",
"_fix",
"(",
"context",
")",
"return",
"ans",
"# Special case for multiplying by power of 10",
"if",
"self",
".",
"_int",
"==",
"(",
"1",
",",
")",
":",
"ans",
"=",
"Decimal",
"(",
"(",
"resultsign",
",",
"other",
".",
"_int",
",",
"resultexp",
")",
")",
"if",
"shouldround",
":",
"ans",
"=",
"ans",
".",
"_fix",
"(",
"context",
")",
"return",
"ans",
"if",
"other",
".",
"_int",
"==",
"(",
"1",
",",
")",
":",
"ans",
"=",
"Decimal",
"(",
"(",
"resultsign",
",",
"self",
".",
"_int",
",",
"resultexp",
")",
")",
"if",
"shouldround",
":",
"ans",
"=",
"ans",
".",
"_fix",
"(",
"context",
")",
"return",
"ans",
"op1",
"=",
"_WorkRep",
"(",
"self",
")",
"op2",
"=",
"_WorkRep",
"(",
"other",
")",
"ans",
"=",
"Decimal",
"(",
"(",
"resultsign",
",",
"map",
"(",
"int",
",",
"str",
"(",
"op1",
".",
"int",
"*",
"op2",
".",
"int",
")",
")",
",",
"resultexp",
")",
")",
"if",
"shouldround",
":",
"ans",
"=",
"ans",
".",
"_fix",
"(",
"context",
")",
"return",
"ans"
] | https://github.com/brendano/tweetmotif/blob/1b0b1e3a941745cd5a26eba01f554688b7c4b27e/everything_else/djfrontend/django-1.0.2/utils/_decimal.py#L1076-L1135 |
|
TencentCloud/tencentcloud-sdk-python | 3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2 | tencentcloud/dts/v20180330/models.py | python | ErrorInfo.__init__ | (self) | r"""
:param ErrorLog: 具体的报错日志, 包含错误码和错误信息
:type ErrorLog: str
:param HelpDoc: 报错对应的帮助文档Ur
:type HelpDoc: str | r"""
:param ErrorLog: 具体的报错日志, 包含错误码和错误信息
:type ErrorLog: str
:param HelpDoc: 报错对应的帮助文档Ur
:type HelpDoc: str | [
"r",
":",
"param",
"ErrorLog",
":",
"具体的报错日志",
"包含错误码和错误信息",
":",
"type",
"ErrorLog",
":",
"str",
":",
"param",
"HelpDoc",
":",
"报错对应的帮助文档Ur",
":",
"type",
"HelpDoc",
":",
"str"
] | def __init__(self):
r"""
:param ErrorLog: 具体的报错日志, 包含错误码和错误信息
:type ErrorLog: str
:param HelpDoc: 报错对应的帮助文档Ur
:type HelpDoc: str
"""
self.ErrorLog = None
self.HelpDoc = None | [
"def",
"__init__",
"(",
"self",
")",
":",
"self",
".",
"ErrorLog",
"=",
"None",
"self",
".",
"HelpDoc",
"=",
"None"
] | https://github.com/TencentCloud/tencentcloud-sdk-python/blob/3677fd1cdc8c5fd626ce001c13fd3b59d1f279d2/tencentcloud/dts/v20180330/models.py#L960-L968 |
||
holzschu/Carnets | 44effb10ddfc6aa5c8b0687582a724ba82c6b547 | Library/lib/python3.7/site-packages/sympy/combinatorics/pc_groups.py | python | Collector.constructive_membership_test | (self, ipcgs, g) | return False | Return the exponent vector for induced pcgs. | Return the exponent vector for induced pcgs. | [
"Return",
"the",
"exponent",
"vector",
"for",
"induced",
"pcgs",
"."
] | def constructive_membership_test(self, ipcgs, g):
"""
Return the exponent vector for induced pcgs.
"""
e = [0]*len(ipcgs)
h = g
d = self.depth(h)
for i, gen in enumerate(ipcgs):
while self.depth(gen) == d:
f = self.leading_exponent(h)*self.leading_exponent(gen)
f = f % self.relative_order[d-1]
h = gen**(-f)*h
e[i] = f
d = self.depth(h)
if h == 1:
return e
return False | [
"def",
"constructive_membership_test",
"(",
"self",
",",
"ipcgs",
",",
"g",
")",
":",
"e",
"=",
"[",
"0",
"]",
"*",
"len",
"(",
"ipcgs",
")",
"h",
"=",
"g",
"d",
"=",
"self",
".",
"depth",
"(",
"h",
")",
"for",
"i",
",",
"gen",
"in",
"enumerate",
"(",
"ipcgs",
")",
":",
"while",
"self",
".",
"depth",
"(",
"gen",
")",
"==",
"d",
":",
"f",
"=",
"self",
".",
"leading_exponent",
"(",
"h",
")",
"*",
"self",
".",
"leading_exponent",
"(",
"gen",
")",
"f",
"=",
"f",
"%",
"self",
".",
"relative_order",
"[",
"d",
"-",
"1",
"]",
"h",
"=",
"gen",
"**",
"(",
"-",
"f",
")",
"*",
"h",
"e",
"[",
"i",
"]",
"=",
"f",
"d",
"=",
"self",
".",
"depth",
"(",
"h",
")",
"if",
"h",
"==",
"1",
":",
"return",
"e",
"return",
"False"
] | https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/site-packages/sympy/combinatorics/pc_groups.py#L656-L672 |
|
IJDykeman/wangTiles | 7c1ee2095ebdf7f72bce07d94c6484915d5cae8b | experimental_code/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/connectionpool.py | python | HTTPConnectionPool.close | (self) | Close all pooled connections and disable the pool. | Close all pooled connections and disable the pool. | [
"Close",
"all",
"pooled",
"connections",
"and",
"disable",
"the",
"pool",
"."
] | def close(self):
"""
Close all pooled connections and disable the pool.
"""
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except Empty:
pass | [
"def",
"close",
"(",
"self",
")",
":",
"# Disable access to the pool",
"old_pool",
",",
"self",
".",
"pool",
"=",
"self",
".",
"pool",
",",
"None",
"try",
":",
"while",
"True",
":",
"conn",
"=",
"old_pool",
".",
"get",
"(",
"block",
"=",
"False",
")",
"if",
"conn",
":",
"conn",
".",
"close",
"(",
")",
"except",
"Empty",
":",
"pass"
] | https://github.com/IJDykeman/wangTiles/blob/7c1ee2095ebdf7f72bce07d94c6484915d5cae8b/experimental_code/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/connectionpool.py#L367-L381 |
||
marinho/geraldo | 868ebdce67176d9b6205cddc92476f642c783fff | site/newsite/site-geraldo/django/utils/translation/trans_real.py | python | blankout | (src, char) | return dot_re.sub(char, src) | Changes every non-whitespace character to the given char.
Used in the templatize function. | Changes every non-whitespace character to the given char.
Used in the templatize function. | [
"Changes",
"every",
"non",
"-",
"whitespace",
"character",
"to",
"the",
"given",
"char",
".",
"Used",
"in",
"the",
"templatize",
"function",
"."
] | def blankout(src, char):
"""
Changes every non-whitespace character to the given char.
Used in the templatize function.
"""
return dot_re.sub(char, src) | [
"def",
"blankout",
"(",
"src",
",",
"char",
")",
":",
"return",
"dot_re",
".",
"sub",
"(",
"char",
",",
"src",
")"
] | https://github.com/marinho/geraldo/blob/868ebdce67176d9b6205cddc92476f642c783fff/site/newsite/site-geraldo/django/utils/translation/trans_real.py#L430-L435 |
|
beeware/ouroboros | a29123c6fab6a807caffbb7587cf548e0c370296 | ouroboros/logging/config.py | python | BaseConfigurator.resolve | (self, s) | Resolve strings to objects using standard import and attribute
syntax. | Resolve strings to objects using standard import and attribute
syntax. | [
"Resolve",
"strings",
"to",
"objects",
"using",
"standard",
"import",
"and",
"attribute",
"syntax",
"."
] | def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v | [
"def",
"resolve",
"(",
"self",
",",
"s",
")",
":",
"name",
"=",
"s",
".",
"split",
"(",
"'.'",
")",
"used",
"=",
"name",
".",
"pop",
"(",
"0",
")",
"try",
":",
"found",
"=",
"self",
".",
"importer",
"(",
"used",
")",
"for",
"frag",
"in",
"name",
":",
"used",
"+=",
"'.'",
"+",
"frag",
"try",
":",
"found",
"=",
"getattr",
"(",
"found",
",",
"frag",
")",
"except",
"AttributeError",
":",
"self",
".",
"importer",
"(",
"used",
")",
"found",
"=",
"getattr",
"(",
"found",
",",
"frag",
")",
"return",
"found",
"except",
"ImportError",
":",
"e",
",",
"tb",
"=",
"sys",
".",
"exc_info",
"(",
")",
"[",
"1",
":",
"]",
"v",
"=",
"ValueError",
"(",
"'Cannot resolve %r: %s'",
"%",
"(",
"s",
",",
"e",
")",
")",
"v",
".",
"__cause__",
",",
"v",
".",
"__traceback__",
"=",
"e",
",",
"tb",
"raise",
"v"
] | https://github.com/beeware/ouroboros/blob/a29123c6fab6a807caffbb7587cf548e0c370296/ouroboros/logging/config.py#L369-L390 |
||
fluentpython/notebooks | 0f6e1e8d1686743dacd9281df7c5b5921812010a | attic/sequences/sentence_slice.py | python | SentenceSlice._handle_defaults | (self, position) | return start, stop + 1 | handle missing or overflow/underflow start/stop | handle missing or overflow/underflow start/stop | [
"handle",
"missing",
"or",
"overflow",
"/",
"underflow",
"start",
"/",
"stop"
] | def _handle_defaults(self, position):
"""handle missing or overflow/underflow start/stop"""
if position.start is None: # missing
start = 0
elif position.start >= len(self.word_index): # overflow
start = len(self.tokens)
else:
start = self.word_index[position.start]
if (position.stop is None # missing
or position.stop > len(self.word_index)): # overflow
stop = self.word_index[-1]
else:
stop = self.word_index[position.stop-1]
return start, stop + 1 | [
"def",
"_handle_defaults",
"(",
"self",
",",
"position",
")",
":",
"if",
"position",
".",
"start",
"is",
"None",
":",
"# missing",
"start",
"=",
"0",
"elif",
"position",
".",
"start",
">=",
"len",
"(",
"self",
".",
"word_index",
")",
":",
"# overflow",
"start",
"=",
"len",
"(",
"self",
".",
"tokens",
")",
"else",
":",
"start",
"=",
"self",
".",
"word_index",
"[",
"position",
".",
"start",
"]",
"if",
"(",
"position",
".",
"stop",
"is",
"None",
"# missing",
"or",
"position",
".",
"stop",
">",
"len",
"(",
"self",
".",
"word_index",
")",
")",
":",
"# overflow",
"stop",
"=",
"self",
".",
"word_index",
"[",
"-",
"1",
"]",
"else",
":",
"stop",
"=",
"self",
".",
"word_index",
"[",
"position",
".",
"stop",
"-",
"1",
"]",
"return",
"start",
",",
"stop",
"+",
"1"
] | https://github.com/fluentpython/notebooks/blob/0f6e1e8d1686743dacd9281df7c5b5921812010a/attic/sequences/sentence_slice.py#L41-L54 |
|
openshift/openshift-tools | 1188778e728a6e4781acf728123e5b356380fe6f | ansible/roles/lib_oa_openshift/library/oc_user.py | python | Utils.openshift_installed | () | return rpmquery.count() > 0 | check if openshift is installed | check if openshift is installed | [
"check",
"if",
"openshift",
"is",
"installed"
] | def openshift_installed():
''' check if openshift is installed '''
import rpm
transaction_set = rpm.TransactionSet()
rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
return rpmquery.count() > 0 | [
"def",
"openshift_installed",
"(",
")",
":",
"import",
"rpm",
"transaction_set",
"=",
"rpm",
".",
"TransactionSet",
"(",
")",
"rpmquery",
"=",
"transaction_set",
".",
"dbMatch",
"(",
"\"name\"",
",",
"\"atomic-openshift\"",
")",
"return",
"rpmquery",
".",
"count",
"(",
")",
">",
"0"
] | https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/ansible/roles/lib_oa_openshift/library/oc_user.py#L1372-L1379 |
|
joosthoeks/jhTAlib | 4931a34829d966ccc973fb29d767a359d6e94b44 | jhtalib/behavioral_techniques/behavioral_techniques.py | python | ATL | (df, price='Low') | return atl_dict | All Time Low
Returns: dict of lists of floats = jhta.ATL(df, price='Low') | All Time Low
Returns: dict of lists of floats = jhta.ATL(df, price='Low') | [
"All",
"Time",
"Low",
"Returns",
":",
"dict",
"of",
"lists",
"of",
"floats",
"=",
"jhta",
".",
"ATL",
"(",
"df",
"price",
"=",
"Low",
")"
] | def ATL(df, price='Low'):
"""
All Time Low
Returns: dict of lists of floats = jhta.ATL(df, price='Low')
"""
atl_dict = {'atl': [], 'atl_index': []}
for i in range(len(df[price])):
df_part_list = df[price][0:i+1]
atl = min(df_part_list)
atl_dict['atl'].append(atl)
atl_index = df_part_list.index(min(df_part_list))
atl_dict['atl_index'].append(atl_index)
return atl_dict | [
"def",
"ATL",
"(",
"df",
",",
"price",
"=",
"'Low'",
")",
":",
"atl_dict",
"=",
"{",
"'atl'",
":",
"[",
"]",
",",
"'atl_index'",
":",
"[",
"]",
"}",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"df",
"[",
"price",
"]",
")",
")",
":",
"df_part_list",
"=",
"df",
"[",
"price",
"]",
"[",
"0",
":",
"i",
"+",
"1",
"]",
"atl",
"=",
"min",
"(",
"df_part_list",
")",
"atl_dict",
"[",
"'atl'",
"]",
".",
"append",
"(",
"atl",
")",
"atl_index",
"=",
"df_part_list",
".",
"index",
"(",
"min",
"(",
"df_part_list",
")",
")",
"atl_dict",
"[",
"'atl_index'",
"]",
".",
"append",
"(",
"atl_index",
")",
"return",
"atl_dict"
] | https://github.com/joosthoeks/jhTAlib/blob/4931a34829d966ccc973fb29d767a359d6e94b44/jhtalib/behavioral_techniques/behavioral_techniques.py#L25-L37 |
|
shidenggui/easytrader | dbb166564c6c73da3446588a19d2692ad52716cb | easytrader/joinquant_follower.py | python | JoinQuantFollower.follow | (
self,
users,
strategies,
track_interval=1,
trade_cmd_expire_seconds=120,
cmd_cache=True,
entrust_prop="limit",
send_interval=0,
) | 跟踪joinquant对应的模拟交易,支持多用户多策略
:param users: 支持easytrader的用户对象,支持使用 [] 指定多个用户
:param strategies: joinquant 的模拟交易地址,支持使用 [] 指定多个模拟交易,
地址类似 https://www.joinquant.com/algorithm/live/index?backtestId=xxx
:param track_interval: 轮训模拟交易时间,单位为秒
:param trade_cmd_expire_seconds: 交易指令过期时间, 单位为秒
:param cmd_cache: 是否读取存储历史执行过的指令,防止重启时重复执行已经交易过的指令
:param entrust_prop: 委托方式, 'limit' 为限价,'market' 为市价, 仅在银河实现
:param send_interval: 交易发送间隔, 默认为0s。调大可防止卖出买入时卖出单没有及时成交导致的买入金额不足 | 跟踪joinquant对应的模拟交易,支持多用户多策略
:param users: 支持easytrader的用户对象,支持使用 [] 指定多个用户
:param strategies: joinquant 的模拟交易地址,支持使用 [] 指定多个模拟交易,
地址类似 https://www.joinquant.com/algorithm/live/index?backtestId=xxx
:param track_interval: 轮训模拟交易时间,单位为秒
:param trade_cmd_expire_seconds: 交易指令过期时间, 单位为秒
:param cmd_cache: 是否读取存储历史执行过的指令,防止重启时重复执行已经交易过的指令
:param entrust_prop: 委托方式, 'limit' 为限价,'market' 为市价, 仅在银河实现
:param send_interval: 交易发送间隔, 默认为0s。调大可防止卖出买入时卖出单没有及时成交导致的买入金额不足 | [
"跟踪joinquant对应的模拟交易,支持多用户多策略",
":",
"param",
"users",
":",
"支持easytrader的用户对象,支持使用",
"[]",
"指定多个用户",
":",
"param",
"strategies",
":",
"joinquant",
"的模拟交易地址,支持使用",
"[]",
"指定多个模拟交易",
"地址类似",
"https",
":",
"//",
"www",
".",
"joinquant",
".",
"com",
"/",
"algorithm",
"/",
"live",
"/",
"index?backtestId",
"=",
"xxx",
":",
"param",
"track_interval",
":",
"轮训模拟交易时间,单位为秒",
":",
"param",
"trade_cmd_expire_seconds",
":",
"交易指令过期时间",
"单位为秒",
":",
"param",
"cmd_cache",
":",
"是否读取存储历史执行过的指令,防止重启时重复执行已经交易过的指令",
":",
"param",
"entrust_prop",
":",
"委托方式",
"limit",
"为限价,",
"market",
"为市价",
"仅在银河实现",
":",
"param",
"send_interval",
":",
"交易发送间隔,",
"默认为0s。调大可防止卖出买入时卖出单没有及时成交导致的买入金额不足"
] | def follow(
self,
users,
strategies,
track_interval=1,
trade_cmd_expire_seconds=120,
cmd_cache=True,
entrust_prop="limit",
send_interval=0,
):
"""跟踪joinquant对应的模拟交易,支持多用户多策略
:param users: 支持easytrader的用户对象,支持使用 [] 指定多个用户
:param strategies: joinquant 的模拟交易地址,支持使用 [] 指定多个模拟交易,
地址类似 https://www.joinquant.com/algorithm/live/index?backtestId=xxx
:param track_interval: 轮训模拟交易时间,单位为秒
:param trade_cmd_expire_seconds: 交易指令过期时间, 单位为秒
:param cmd_cache: 是否读取存储历史执行过的指令,防止重启时重复执行已经交易过的指令
:param entrust_prop: 委托方式, 'limit' 为限价,'market' 为市价, 仅在银河实现
:param send_interval: 交易发送间隔, 默认为0s。调大可防止卖出买入时卖出单没有及时成交导致的买入金额不足
"""
users = self.warp_list(users)
strategies = self.warp_list(strategies)
if cmd_cache:
self.load_expired_cmd_cache()
self.start_trader_thread(
users, trade_cmd_expire_seconds, entrust_prop, send_interval
)
workers = []
for strategy_url in strategies:
try:
strategy_id = self.extract_strategy_id(strategy_url)
strategy_name = self.extract_strategy_name(strategy_url)
except:
logger.error("抽取交易id和策略名失败, 无效的模拟交易url: %s", strategy_url)
raise
strategy_worker = Thread(
target=self.track_strategy_worker,
args=[strategy_id, strategy_name],
kwargs={"interval": track_interval},
)
strategy_worker.start()
workers.append(strategy_worker)
logger.info("开始跟踪策略: %s", strategy_name)
for worker in workers:
worker.join() | [
"def",
"follow",
"(",
"self",
",",
"users",
",",
"strategies",
",",
"track_interval",
"=",
"1",
",",
"trade_cmd_expire_seconds",
"=",
"120",
",",
"cmd_cache",
"=",
"True",
",",
"entrust_prop",
"=",
"\"limit\"",
",",
"send_interval",
"=",
"0",
",",
")",
":",
"users",
"=",
"self",
".",
"warp_list",
"(",
"users",
")",
"strategies",
"=",
"self",
".",
"warp_list",
"(",
"strategies",
")",
"if",
"cmd_cache",
":",
"self",
".",
"load_expired_cmd_cache",
"(",
")",
"self",
".",
"start_trader_thread",
"(",
"users",
",",
"trade_cmd_expire_seconds",
",",
"entrust_prop",
",",
"send_interval",
")",
"workers",
"=",
"[",
"]",
"for",
"strategy_url",
"in",
"strategies",
":",
"try",
":",
"strategy_id",
"=",
"self",
".",
"extract_strategy_id",
"(",
"strategy_url",
")",
"strategy_name",
"=",
"self",
".",
"extract_strategy_name",
"(",
"strategy_url",
")",
"except",
":",
"logger",
".",
"error",
"(",
"\"抽取交易id和策略名失败, 无效的模拟交易url: %s\", strategy_url)",
"",
"",
"",
"raise",
"strategy_worker",
"=",
"Thread",
"(",
"target",
"=",
"self",
".",
"track_strategy_worker",
",",
"args",
"=",
"[",
"strategy_id",
",",
"strategy_name",
"]",
",",
"kwargs",
"=",
"{",
"\"interval\"",
":",
"track_interval",
"}",
",",
")",
"strategy_worker",
".",
"start",
"(",
")",
"workers",
".",
"append",
"(",
"strategy_worker",
")",
"logger",
".",
"info",
"(",
"\"开始跟踪策略: %s\", strategy_n",
"a",
"e)",
"",
"for",
"worker",
"in",
"workers",
":",
"worker",
".",
"join",
"(",
")"
] | https://github.com/shidenggui/easytrader/blob/dbb166564c6c73da3446588a19d2692ad52716cb/easytrader/joinquant_follower.py#L33-L80 |
||
mvanveen/hncrawl | 1ff4d3aa945db37f0840916b38ea7c5f58c1c017 | news/spiders/beautiful_soup.py | python | Tag.__str__ | (self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0) | return s | Returns a string or Unicode representation of this tag and
its contents. To get Unicode, pass None for encoding.
NOTE: since Python's HTML parser consumes whitespace, this
method is not certain to reproduce the whitespace present in
the original string. | Returns a string or Unicode representation of this tag and
its contents. To get Unicode, pass None for encoding. | [
"Returns",
"a",
"string",
"or",
"Unicode",
"representation",
"of",
"this",
"tag",
"and",
"its",
"contents",
".",
"To",
"get",
"Unicode",
"pass",
"None",
"for",
"encoding",
"."
] | def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Returns a string or Unicode representation of this tag and
its contents. To get Unicode, pass None for encoding.
NOTE: since Python's HTML parser consumes whitespace, this
method is not certain to reproduce the whitespace present in
the original string."""
encodedName = self.toEncoding(self.name, encoding)
attrs = []
if self.attrs:
for key, val in self.attrs:
fmt = '%s="%s"'
if isinstance(val, basestring):
if self.containsSubstitutions and '%SOUP-ENCODING%' in val:
val = self.substituteEncoding(val, encoding)
# The attribute value either:
#
# * Contains no embedded double quotes or single quotes.
# No problem: we enclose it in double quotes.
# * Contains embedded single quotes. No problem:
# double quotes work here too.
# * Contains embedded double quotes. No problem:
# we enclose it in single quotes.
# * Embeds both single _and_ double quotes. This
# can't happen naturally, but it can happen if
# you modify an attribute value after parsing
# the document. Now we have a bit of a
# problem. We solve it by enclosing the
# attribute in single quotes, and escaping any
# embedded single quotes to XML entities.
if '"' in val:
fmt = "%s='%s'"
if "'" in val:
# TODO: replace with apos when
# appropriate.
val = val.replace("'", "&squot;")
# Now we're okay w/r/t quotes. But the attribute
# value might also contain angle brackets, or
# ampersands that aren't part of entities. We need
# to escape those to XML entities too.
val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)
attrs.append(fmt % (self.toEncoding(key, encoding),
self.toEncoding(val, encoding)))
close = ''
closeTag = ''
if self.isSelfClosing:
close = ' /'
else:
closeTag = '</%s>' % encodedName
indentTag, indentContents = 0, 0
if prettyPrint:
indentTag = indentLevel
space = (' ' * (indentTag-1))
indentContents = indentTag + 1
contents = self.renderContents(encoding, prettyPrint, indentContents)
if self.hidden:
s = contents
else:
s = []
attributeString = ''
if attrs:
attributeString = ' ' + ' '.join(attrs)
if prettyPrint:
s.append(space)
s.append('<%s%s%s>' % (encodedName, attributeString, close))
if prettyPrint:
s.append("\n")
s.append(contents)
if prettyPrint and contents and contents[-1] != "\n":
s.append("\n")
if prettyPrint and closeTag:
s.append(space)
s.append(closeTag)
if prettyPrint and closeTag and self.nextSibling:
s.append("\n")
s = ''.join(s)
return s | [
"def",
"__str__",
"(",
"self",
",",
"encoding",
"=",
"DEFAULT_OUTPUT_ENCODING",
",",
"prettyPrint",
"=",
"False",
",",
"indentLevel",
"=",
"0",
")",
":",
"encodedName",
"=",
"self",
".",
"toEncoding",
"(",
"self",
".",
"name",
",",
"encoding",
")",
"attrs",
"=",
"[",
"]",
"if",
"self",
".",
"attrs",
":",
"for",
"key",
",",
"val",
"in",
"self",
".",
"attrs",
":",
"fmt",
"=",
"'%s=\"%s\"'",
"if",
"isinstance",
"(",
"val",
",",
"basestring",
")",
":",
"if",
"self",
".",
"containsSubstitutions",
"and",
"'%SOUP-ENCODING%'",
"in",
"val",
":",
"val",
"=",
"self",
".",
"substituteEncoding",
"(",
"val",
",",
"encoding",
")",
"# The attribute value either:",
"#",
"# * Contains no embedded double quotes or single quotes.",
"# No problem: we enclose it in double quotes.",
"# * Contains embedded single quotes. No problem:",
"# double quotes work here too.",
"# * Contains embedded double quotes. No problem:",
"# we enclose it in single quotes.",
"# * Embeds both single _and_ double quotes. This",
"# can't happen naturally, but it can happen if",
"# you modify an attribute value after parsing",
"# the document. Now we have a bit of a",
"# problem. We solve it by enclosing the",
"# attribute in single quotes, and escaping any",
"# embedded single quotes to XML entities.",
"if",
"'\"'",
"in",
"val",
":",
"fmt",
"=",
"\"%s='%s'\"",
"if",
"\"'\"",
"in",
"val",
":",
"# TODO: replace with apos when",
"# appropriate.",
"val",
"=",
"val",
".",
"replace",
"(",
"\"'\"",
",",
"\"&squot;\"",
")",
"# Now we're okay w/r/t quotes. But the attribute",
"# value might also contain angle brackets, or",
"# ampersands that aren't part of entities. We need",
"# to escape those to XML entities too.",
"val",
"=",
"self",
".",
"BARE_AMPERSAND_OR_BRACKET",
".",
"sub",
"(",
"self",
".",
"_sub_entity",
",",
"val",
")",
"attrs",
".",
"append",
"(",
"fmt",
"%",
"(",
"self",
".",
"toEncoding",
"(",
"key",
",",
"encoding",
")",
",",
"self",
".",
"toEncoding",
"(",
"val",
",",
"encoding",
")",
")",
")",
"close",
"=",
"''",
"closeTag",
"=",
"''",
"if",
"self",
".",
"isSelfClosing",
":",
"close",
"=",
"' /'",
"else",
":",
"closeTag",
"=",
"'</%s>'",
"%",
"encodedName",
"indentTag",
",",
"indentContents",
"=",
"0",
",",
"0",
"if",
"prettyPrint",
":",
"indentTag",
"=",
"indentLevel",
"space",
"=",
"(",
"' '",
"*",
"(",
"indentTag",
"-",
"1",
")",
")",
"indentContents",
"=",
"indentTag",
"+",
"1",
"contents",
"=",
"self",
".",
"renderContents",
"(",
"encoding",
",",
"prettyPrint",
",",
"indentContents",
")",
"if",
"self",
".",
"hidden",
":",
"s",
"=",
"contents",
"else",
":",
"s",
"=",
"[",
"]",
"attributeString",
"=",
"''",
"if",
"attrs",
":",
"attributeString",
"=",
"' '",
"+",
"' '",
".",
"join",
"(",
"attrs",
")",
"if",
"prettyPrint",
":",
"s",
".",
"append",
"(",
"space",
")",
"s",
".",
"append",
"(",
"'<%s%s%s>'",
"%",
"(",
"encodedName",
",",
"attributeString",
",",
"close",
")",
")",
"if",
"prettyPrint",
":",
"s",
".",
"append",
"(",
"\"\\n\"",
")",
"s",
".",
"append",
"(",
"contents",
")",
"if",
"prettyPrint",
"and",
"contents",
"and",
"contents",
"[",
"-",
"1",
"]",
"!=",
"\"\\n\"",
":",
"s",
".",
"append",
"(",
"\"\\n\"",
")",
"if",
"prettyPrint",
"and",
"closeTag",
":",
"s",
".",
"append",
"(",
"space",
")",
"s",
".",
"append",
"(",
"closeTag",
")",
"if",
"prettyPrint",
"and",
"closeTag",
"and",
"self",
".",
"nextSibling",
":",
"s",
".",
"append",
"(",
"\"\\n\"",
")",
"s",
"=",
"''",
".",
"join",
"(",
"s",
")",
"return",
"s"
] | https://github.com/mvanveen/hncrawl/blob/1ff4d3aa945db37f0840916b38ea7c5f58c1c017/news/spiders/beautiful_soup.py#L693-L776 |
|
django-oscar/django-oscar | ffcc530844d40283b6b1552778a140536b904f5f | src/oscar/apps/dashboard/menu.py | python | get_nodes | (user) | return visible_nodes | Return the visible navigation nodes for the passed user | Return the visible navigation nodes for the passed user | [
"Return",
"the",
"visible",
"navigation",
"nodes",
"for",
"the",
"passed",
"user"
] | def get_nodes(user):
"""
Return the visible navigation nodes for the passed user
"""
all_nodes = create_menu(settings.OSCAR_DASHBOARD_NAVIGATION)
visible_nodes = []
for node in all_nodes:
filtered_node = node.filter(user)
# don't append headings without children
if filtered_node and (filtered_node.has_children()
or not filtered_node.is_heading):
visible_nodes.append(filtered_node)
return visible_nodes | [
"def",
"get_nodes",
"(",
"user",
")",
":",
"all_nodes",
"=",
"create_menu",
"(",
"settings",
".",
"OSCAR_DASHBOARD_NAVIGATION",
")",
"visible_nodes",
"=",
"[",
"]",
"for",
"node",
"in",
"all_nodes",
":",
"filtered_node",
"=",
"node",
".",
"filter",
"(",
"user",
")",
"# don't append headings without children",
"if",
"filtered_node",
"and",
"(",
"filtered_node",
".",
"has_children",
"(",
")",
"or",
"not",
"filtered_node",
".",
"is_heading",
")",
":",
"visible_nodes",
".",
"append",
"(",
"filtered_node",
")",
"return",
"visible_nodes"
] | https://github.com/django-oscar/django-oscar/blob/ffcc530844d40283b6b1552778a140536b904f5f/src/oscar/apps/dashboard/menu.py#L10-L22 |
|
caiiiac/Machine-Learning-with-Python | 1a26c4467da41ca4ebc3d5bd789ea942ef79422f | MachineLearning/venv/lib/python3.5/site-packages/pyparsing.py | python | ParserElement.__rsub__ | (self, other ) | return other - self | Implementation of - operator when left operand is not a C{L{ParserElement}} | Implementation of - operator when left operand is not a C{L{ParserElement}} | [
"Implementation",
"of",
"-",
"operator",
"when",
"left",
"operand",
"is",
"not",
"a",
"C",
"{",
"L",
"{",
"ParserElement",
"}}"
] | def __rsub__(self, other ):
"""
Implementation of - operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other - self | [
"def",
"__rsub__",
"(",
"self",
",",
"other",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"basestring",
")",
":",
"other",
"=",
"ParserElement",
".",
"_literalStringClass",
"(",
"other",
")",
"if",
"not",
"isinstance",
"(",
"other",
",",
"ParserElement",
")",
":",
"warnings",
".",
"warn",
"(",
"\"Cannot combine element of type %s with ParserElement\"",
"%",
"type",
"(",
"other",
")",
",",
"SyntaxWarning",
",",
"stacklevel",
"=",
"2",
")",
"return",
"None",
"return",
"other",
"-",
"self"
] | https://github.com/caiiiac/Machine-Learning-with-Python/blob/1a26c4467da41ca4ebc3d5bd789ea942ef79422f/MachineLearning/venv/lib/python3.5/site-packages/pyparsing.py#L1843-L1853 |
|
henkelis/sonospy | 841f52010fd6e1e932d8f1a8896ad4e5a0667b8a | sonospy/control_point_sonos.py | python | ControlPointSonos.search | (self, container_id, search_criteria, filter, starting_index,
requested_count, sort_criteria, device=None) | return search_response | Search items in Media Server.
This method search items with search_criteria key in the container_id
of current media server.
@param container_id: unique identifier of the container in which
to begin searching.
@param search_criteria: search criteria
@param filter: a filter to indicate which metadata properties
are to be returned.
@param starting_index: starting index to consider the requested
count
@param requested_count: requested number of entries under the
object specified by container_id
@param sort_criteria: sorting criteria
@type container_id: string
@type search_criteria: string
@type filter: string
@type starting_index: int
@type requested_count: int
@type sort_criteria: string
@return: search result
@rtype: dict | Search items in Media Server. | [
"Search",
"items",
"in",
"Media",
"Server",
"."
] | def search(self, container_id, search_criteria, filter, starting_index,
requested_count, sort_criteria, device=None):
""" Search items in Media Server.
This method search items with search_criteria key in the container_id
of current media server.
@param container_id: unique identifier of the container in which
to begin searching.
@param search_criteria: search criteria
@param filter: a filter to indicate which metadata properties
are to be returned.
@param starting_index: starting index to consider the requested
count
@param requested_count: requested number of entries under the
object specified by container_id
@param sort_criteria: sorting criteria
@type container_id: string
@type search_criteria: string
@type filter: string
@type starting_index: int
@type requested_count: int
@type sort_criteria: string
@return: search result
@rtype: dict
"""
service = self.get_cd_service(device)
search_response = service.Search(ContainerID=container_id,
SearchCriteria=search_criteria,
Filter=filter,
StartingIndex=starting_index,
RequestedCount=requested_count,
SortCriteria=sort_criteria)
# elt = Element.from_string(search_response['Result'])
# return elt.get_items()
if 'Result' in search_response:
items = ElementItem().from_string(search_response['Result'])
search_response['Result'] = []
for item in items:
# get the class of the item
class_name = find(item, 'upnp', 'class').text
# for certain classes parse the attributes into a Sonos object to capture the extended elements
# TODO: decide whether to patch the BRisa classes
elt = None
if class_name == 'object.item.audioItem.audioBroadcast':
elt = SonosAudioBroadcast()
elif class_name == 'object.item.audioItem.musicTrack':
elt = SonosMusicTrack()
elif class_name == 'object.item.audioItem.musicTrack.recentShow':
elt = SonosMusicTrackShow()
elif class_name == 'object.item':
elt = SonosItem()
else:
try:
name = class_name.split('.')[-1]
class_name = "%s%s" % (name[0].upper(), name[1:])
upnp_class = eval(class_name)
elt = upnp_class()
except Exception:
log.debug('Unknown upnp class in media server search')
if elt != None:
elt.from_element(item)
search_response['Result'].append(elt)
return search_response | [
"def",
"search",
"(",
"self",
",",
"container_id",
",",
"search_criteria",
",",
"filter",
",",
"starting_index",
",",
"requested_count",
",",
"sort_criteria",
",",
"device",
"=",
"None",
")",
":",
"service",
"=",
"self",
".",
"get_cd_service",
"(",
"device",
")",
"search_response",
"=",
"service",
".",
"Search",
"(",
"ContainerID",
"=",
"container_id",
",",
"SearchCriteria",
"=",
"search_criteria",
",",
"Filter",
"=",
"filter",
",",
"StartingIndex",
"=",
"starting_index",
",",
"RequestedCount",
"=",
"requested_count",
",",
"SortCriteria",
"=",
"sort_criteria",
")",
"# elt = Element.from_string(search_response['Result'])",
"# return elt.get_items()",
"if",
"'Result'",
"in",
"search_response",
":",
"items",
"=",
"ElementItem",
"(",
")",
".",
"from_string",
"(",
"search_response",
"[",
"'Result'",
"]",
")",
"search_response",
"[",
"'Result'",
"]",
"=",
"[",
"]",
"for",
"item",
"in",
"items",
":",
"# get the class of the item",
"class_name",
"=",
"find",
"(",
"item",
",",
"'upnp'",
",",
"'class'",
")",
".",
"text",
"# for certain classes parse the attributes into a Sonos object to capture the extended elements",
"# TODO: decide whether to patch the BRisa classes",
"elt",
"=",
"None",
"if",
"class_name",
"==",
"'object.item.audioItem.audioBroadcast'",
":",
"elt",
"=",
"SonosAudioBroadcast",
"(",
")",
"elif",
"class_name",
"==",
"'object.item.audioItem.musicTrack'",
":",
"elt",
"=",
"SonosMusicTrack",
"(",
")",
"elif",
"class_name",
"==",
"'object.item.audioItem.musicTrack.recentShow'",
":",
"elt",
"=",
"SonosMusicTrackShow",
"(",
")",
"elif",
"class_name",
"==",
"'object.item'",
":",
"elt",
"=",
"SonosItem",
"(",
")",
"else",
":",
"try",
":",
"name",
"=",
"class_name",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"class_name",
"=",
"\"%s%s\"",
"%",
"(",
"name",
"[",
"0",
"]",
".",
"upper",
"(",
")",
",",
"name",
"[",
"1",
":",
"]",
")",
"upnp_class",
"=",
"eval",
"(",
"class_name",
")",
"elt",
"=",
"upnp_class",
"(",
")",
"except",
"Exception",
":",
"log",
".",
"debug",
"(",
"'Unknown upnp class in media server search'",
")",
"if",
"elt",
"!=",
"None",
":",
"elt",
".",
"from_element",
"(",
"item",
")",
"search_response",
"[",
"'Result'",
"]",
".",
"append",
"(",
"elt",
")",
"return",
"search_response"
] | https://github.com/henkelis/sonospy/blob/841f52010fd6e1e932d8f1a8896ad4e5a0667b8a/sonospy/control_point_sonos.py#L581-L650 |
|
home-assistant/core | 265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1 | homeassistant/components/proxy/camera.py | python | ImageOpts.__bool__ | (self) | return bool(self.max_width or self.quality) | Bool evaluation rules. | Bool evaluation rules. | [
"Bool",
"evaluation",
"rules",
"."
] | def __bool__(self):
"""Bool evaluation rules."""
return bool(self.max_width or self.quality) | [
"def",
"__bool__",
"(",
"self",
")",
":",
"return",
"bool",
"(",
"self",
".",
"max_width",
"or",
"self",
".",
"quality",
")"
] | https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/proxy/camera.py#L190-L192 |
|
ywangd/stash | 773d15b8fb3853a65c15fe160bf5584c99437170 | system/shstreams.py | python | ShStream.dispatch | (self, event, *args, **kwargs) | Dispatches an event.
If any of the attached listeners throws an exception, the
subsequent callbacks are be aborted.
:param str event: event to dispatch.
:param list args: arguments to pass to event handlers. | Dispatches an event. | [
"Dispatches",
"an",
"event",
"."
] | def dispatch(self, event, *args, **kwargs):
"""Dispatches an event.
If any of the attached listeners throws an exception, the
subsequent callbacks are be aborted.
:param str event: event to dispatch.
:param list args: arguments to pass to event handlers.
"""
# noinspection PyCallingNonCallable
try:
handler = getattr(self.main_screen, event)
handler(*args)
except AttributeError:
pass
if kwargs.get('reset', True):
self.reset() | [
"def",
"dispatch",
"(",
"self",
",",
"event",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# noinspection PyCallingNonCallable",
"try",
":",
"handler",
"=",
"getattr",
"(",
"self",
".",
"main_screen",
",",
"event",
")",
"handler",
"(",
"*",
"args",
")",
"except",
"AttributeError",
":",
"pass",
"if",
"kwargs",
".",
"get",
"(",
"'reset'",
",",
"True",
")",
":",
"self",
".",
"reset",
"(",
")"
] | https://github.com/ywangd/stash/blob/773d15b8fb3853a65c15fe160bf5584c99437170/system/shstreams.py#L391-L409 |
||
schodet/nxt-python | b434303f098d13677bceb664810f03e8c5057c82 | nxt/sensor/hitechnic.py | python | Compass.is_in_range | (self,minval,maxval) | return bool(inverted) ^ bool(in_range) | This deserves a little explanation:
if max > min, it's straightforward, but
if min > max, it switches the values of max and min
and returns true if heading is NOT between the new max and min | This deserves a little explanation:
if max > min, it's straightforward, but
if min > max, it switches the values of max and min
and returns true if heading is NOT between the new max and min | [
"This",
"deserves",
"a",
"little",
"explanation",
":",
"if",
"max",
">",
"min",
"it",
"s",
"straightforward",
"but",
"if",
"min",
">",
"max",
"it",
"switches",
"the",
"values",
"of",
"max",
"and",
"min",
"and",
"returns",
"true",
"if",
"heading",
"is",
"NOT",
"between",
"the",
"new",
"max",
"and",
"min"
] | def is_in_range(self,minval,maxval):
"""This deserves a little explanation:
if max > min, it's straightforward, but
if min > max, it switches the values of max and min
and returns true if heading is NOT between the new max and min
"""
if minval > maxval:
(maxval,minval) = (minval,maxval)
inverted = True
else:
inverted = False
heading = self.get_sample()
in_range = (heading > minval) and (heading < maxval)
#an xor handles the reversal
#a faster, more compact way of saying
#if !reversed return in_range
#if reversed return !in_range
return bool(inverted) ^ bool(in_range) | [
"def",
"is_in_range",
"(",
"self",
",",
"minval",
",",
"maxval",
")",
":",
"if",
"minval",
">",
"maxval",
":",
"(",
"maxval",
",",
"minval",
")",
"=",
"(",
"minval",
",",
"maxval",
")",
"inverted",
"=",
"True",
"else",
":",
"inverted",
"=",
"False",
"heading",
"=",
"self",
".",
"get_sample",
"(",
")",
"in_range",
"=",
"(",
"heading",
">",
"minval",
")",
"and",
"(",
"heading",
"<",
"maxval",
")",
"#an xor handles the reversal",
"#a faster, more compact way of saying",
"#if !reversed return in_range",
"#if reversed return !in_range",
"return",
"bool",
"(",
"inverted",
")",
"^",
"bool",
"(",
"in_range",
")"
] | https://github.com/schodet/nxt-python/blob/b434303f098d13677bceb664810f03e8c5057c82/nxt/sensor/hitechnic.py#L54-L71 |
|
pypa/setuptools | 9f37366aab9cd8f6baa23e6a77cfdb8daf97757e | setuptools/_distutils/_msvccompiler.py | python | MSVCCompiler.library_dir_option | (self, dir) | return "/LIBPATH:" + dir | [] | def library_dir_option(self, dir):
return "/LIBPATH:" + dir | [
"def",
"library_dir_option",
"(",
"self",
",",
"dir",
")",
":",
"return",
"\"/LIBPATH:\"",
"+",
"dir"
] | https://github.com/pypa/setuptools/blob/9f37366aab9cd8f6baa23e6a77cfdb8daf97757e/setuptools/_distutils/_msvccompiler.py#L537-L538 |
|||
tp4a/teleport | 1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad | server/www/packages/packages-linux/x64/mako/ext/autohandler.py | python | _file_exists | (lookup, path) | [] | def _file_exists(lookup, path):
psub = re.sub(r"^/", "", path)
for d in lookup.directories:
if os.path.exists(d + "/" + psub):
return True
else:
return False | [
"def",
"_file_exists",
"(",
"lookup",
",",
"path",
")",
":",
"psub",
"=",
"re",
".",
"sub",
"(",
"r\"^/\"",
",",
"\"\"",
",",
"path",
")",
"for",
"d",
"in",
"lookup",
".",
"directories",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"d",
"+",
"\"/\"",
"+",
"psub",
")",
":",
"return",
"True",
"else",
":",
"return",
"False"
] | https://github.com/tp4a/teleport/blob/1fafd34f1f775d2cf80ea4af6e44468d8e0b24ad/server/www/packages/packages-linux/x64/mako/ext/autohandler.py#L64-L70 |
||||
igraph/python-igraph | e9f83e8af08f24ea025596e745917197d8b44d94 | src/igraph/datatypes.py | python | Matrix.Fill | (cls, value, *args) | return cls(mtrx) | Creates a matrix filled with the given value
@param value: the value to be used
@keyword shape: the shape of the matrix. Can be a single integer,
two integers or a tuple. If a single integer is
given here, the matrix is assumed to be square-shaped. | Creates a matrix filled with the given value | [
"Creates",
"a",
"matrix",
"filled",
"with",
"the",
"given",
"value"
] | def Fill(cls, value, *args):
"""Creates a matrix filled with the given value
@param value: the value to be used
@keyword shape: the shape of the matrix. Can be a single integer,
two integers or a tuple. If a single integer is
given here, the matrix is assumed to be square-shaped.
"""
if len(args) < 1:
raise TypeError("expected an integer or a tuple")
if len(args) == 1:
if hasattr(args[0], "__len__"):
height, width = int(args[0][0]), int(args[0][1])
else:
height, width = int(args[0]), int(args[0])
else:
height, width = int(args[0]), int(args[1])
mtrx = [[value] * width for _ in range(height)]
return cls(mtrx) | [
"def",
"Fill",
"(",
"cls",
",",
"value",
",",
"*",
"args",
")",
":",
"if",
"len",
"(",
"args",
")",
"<",
"1",
":",
"raise",
"TypeError",
"(",
"\"expected an integer or a tuple\"",
")",
"if",
"len",
"(",
"args",
")",
"==",
"1",
":",
"if",
"hasattr",
"(",
"args",
"[",
"0",
"]",
",",
"\"__len__\"",
")",
":",
"height",
",",
"width",
"=",
"int",
"(",
"args",
"[",
"0",
"]",
"[",
"0",
"]",
")",
",",
"int",
"(",
"args",
"[",
"0",
"]",
"[",
"1",
"]",
")",
"else",
":",
"height",
",",
"width",
"=",
"int",
"(",
"args",
"[",
"0",
"]",
")",
",",
"int",
"(",
"args",
"[",
"0",
"]",
")",
"else",
":",
"height",
",",
"width",
"=",
"int",
"(",
"args",
"[",
"0",
"]",
")",
",",
"int",
"(",
"args",
"[",
"1",
"]",
")",
"mtrx",
"=",
"[",
"[",
"value",
"]",
"*",
"width",
"for",
"_",
"in",
"range",
"(",
"height",
")",
"]",
"return",
"cls",
"(",
"mtrx",
")"
] | https://github.com/igraph/python-igraph/blob/e9f83e8af08f24ea025596e745917197d8b44d94/src/igraph/datatypes.py#L29-L47 |
|
doorstop-dev/doorstop | 03aa287e5069e29da6979274e1cb6714ee450d3a | doorstop/server/main.py | python | main | (args=None) | Process command-line arguments and run the program. | Process command-line arguments and run the program. | [
"Process",
"command",
"-",
"line",
"arguments",
"and",
"run",
"the",
"program",
"."
] | def main(args=None):
"""Process command-line arguments and run the program."""
from doorstop import SERVER, VERSION
# Shared options
debug = argparse.ArgumentParser(add_help=False)
debug.add_argument('-V', '--version', action='version', version=VERSION)
debug.add_argument(
'--debug', action='store_true', help="run the server in debug mode"
)
debug.add_argument(
'--launch', action='store_true', help="open the server UI in a browser"
)
shared = {'formatter_class': HelpFormatter, 'parents': [debug]}
# Build main parser
parser = argparse.ArgumentParser(prog=SERVER, description=__doc__, **shared) # type: ignore
cwd = os.getcwd()
parser.add_argument(
'-j', '--project', default=None, help="path to the root of the project"
)
parser.add_argument(
'-P',
'--port',
metavar='NUM',
type=int,
default=settings.SERVER_PORT,
help="use a custom port for the server",
)
parser.add_argument(
'-H', '--host', default='127.0.0.1', help="IP address to listen"
)
parser.add_argument(
'-w', '--wsgi', action='store_true', help="Run as a WSGI process"
)
parser.add_argument(
'-b',
'--baseurl',
default='',
help="Base URL this is served at (Usually only necessary for WSGI)",
)
# Parse arguments
args = parser.parse_args(args=args)
if args.project is None:
args.project = vcs.find_root(cwd)
# Configure logging
logging.basicConfig(
format=settings.VERBOSE_LOGGING_FORMAT, level=settings.VERBOSE_LOGGING_LEVEL
)
# Run the program
run(args, os.getcwd(), parser.error) | [
"def",
"main",
"(",
"args",
"=",
"None",
")",
":",
"from",
"doorstop",
"import",
"SERVER",
",",
"VERSION",
"# Shared options",
"debug",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"add_help",
"=",
"False",
")",
"debug",
".",
"add_argument",
"(",
"'-V'",
",",
"'--version'",
",",
"action",
"=",
"'version'",
",",
"version",
"=",
"VERSION",
")",
"debug",
".",
"add_argument",
"(",
"'--debug'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"run the server in debug mode\"",
")",
"debug",
".",
"add_argument",
"(",
"'--launch'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"open the server UI in a browser\"",
")",
"shared",
"=",
"{",
"'formatter_class'",
":",
"HelpFormatter",
",",
"'parents'",
":",
"[",
"debug",
"]",
"}",
"# Build main parser",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"prog",
"=",
"SERVER",
",",
"description",
"=",
"__doc__",
",",
"*",
"*",
"shared",
")",
"# type: ignore",
"cwd",
"=",
"os",
".",
"getcwd",
"(",
")",
"parser",
".",
"add_argument",
"(",
"'-j'",
",",
"'--project'",
",",
"default",
"=",
"None",
",",
"help",
"=",
"\"path to the root of the project\"",
")",
"parser",
".",
"add_argument",
"(",
"'-P'",
",",
"'--port'",
",",
"metavar",
"=",
"'NUM'",
",",
"type",
"=",
"int",
",",
"default",
"=",
"settings",
".",
"SERVER_PORT",
",",
"help",
"=",
"\"use a custom port for the server\"",
",",
")",
"parser",
".",
"add_argument",
"(",
"'-H'",
",",
"'--host'",
",",
"default",
"=",
"'127.0.0.1'",
",",
"help",
"=",
"\"IP address to listen\"",
")",
"parser",
".",
"add_argument",
"(",
"'-w'",
",",
"'--wsgi'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"\"Run as a WSGI process\"",
")",
"parser",
".",
"add_argument",
"(",
"'-b'",
",",
"'--baseurl'",
",",
"default",
"=",
"''",
",",
"help",
"=",
"\"Base URL this is served at (Usually only necessary for WSGI)\"",
",",
")",
"# Parse arguments",
"args",
"=",
"parser",
".",
"parse_args",
"(",
"args",
"=",
"args",
")",
"if",
"args",
".",
"project",
"is",
"None",
":",
"args",
".",
"project",
"=",
"vcs",
".",
"find_root",
"(",
"cwd",
")",
"# Configure logging",
"logging",
".",
"basicConfig",
"(",
"format",
"=",
"settings",
".",
"VERBOSE_LOGGING_FORMAT",
",",
"level",
"=",
"settings",
".",
"VERBOSE_LOGGING_LEVEL",
")",
"# Run the program",
"run",
"(",
"args",
",",
"os",
".",
"getcwd",
"(",
")",
",",
"parser",
".",
"error",
")"
] | https://github.com/doorstop-dev/doorstop/blob/03aa287e5069e29da6979274e1cb6714ee450d3a/doorstop/server/main.py#L28-L83 |
||
tracim/tracim | a0e9746fde5a4c45b4e0f0bfa2caf9522b8c4e21 | backend/tracim_backend/lib/rq/__init__.py | python | get_rq_queue | (
redis_connection: redis.Redis,
queue_name: RqQueueName = RqQueueName.DEFAULT,
is_async: bool = True,
) | return rq.Queue(name=queue_name.value, connection=redis_connection, is_async=is_async) | :param queue_name: name of queue
:return: wanted queue | :param queue_name: name of queue
:return: wanted queue | [
":",
"param",
"queue_name",
":",
"name",
"of",
"queue",
":",
"return",
":",
"wanted",
"queue"
] | def get_rq_queue(
redis_connection: redis.Redis,
queue_name: RqQueueName = RqQueueName.DEFAULT,
is_async: bool = True,
) -> rq.Queue:
"""
:param queue_name: name of queue
:return: wanted queue
"""
return rq.Queue(name=queue_name.value, connection=redis_connection, is_async=is_async) | [
"def",
"get_rq_queue",
"(",
"redis_connection",
":",
"redis",
".",
"Redis",
",",
"queue_name",
":",
"RqQueueName",
"=",
"RqQueueName",
".",
"DEFAULT",
",",
"is_async",
":",
"bool",
"=",
"True",
",",
")",
"->",
"rq",
".",
"Queue",
":",
"return",
"rq",
".",
"Queue",
"(",
"name",
"=",
"queue_name",
".",
"value",
",",
"connection",
"=",
"redis_connection",
",",
"is_async",
"=",
"is_async",
")"
] | https://github.com/tracim/tracim/blob/a0e9746fde5a4c45b4e0f0bfa2caf9522b8c4e21/backend/tracim_backend/lib/rq/__init__.py#L29-L39 |
|
jesolem/PCV | 376d5975a033a7bde0102fa4b8fc29750d7fc98b | pcv_book/harris.py | python | match | (desc1,desc2,threshold=0.5) | return matchscores | For each corner point descriptor in the first image,
select its match to second image using
normalized cross correlation. | For each corner point descriptor in the first image,
select its match to second image using
normalized cross correlation. | [
"For",
"each",
"corner",
"point",
"descriptor",
"in",
"the",
"first",
"image",
"select",
"its",
"match",
"to",
"second",
"image",
"using",
"normalized",
"cross",
"correlation",
"."
] | def match(desc1,desc2,threshold=0.5):
""" For each corner point descriptor in the first image,
select its match to second image using
normalized cross correlation. """
n = len(desc1[0])
# pair-wise distances
d = -ones((len(desc1),len(desc2)))
for i in range(len(desc1)):
for j in range(len(desc2)):
d1 = (desc1[i] - mean(desc1[i])) / std(desc1[i])
d2 = (desc2[j] - mean(desc2[j])) / std(desc2[j])
ncc_value = sum(d1 * d2) / (n-1)
if ncc_value > threshold:
d[i,j] = ncc_value
ndx = argsort(-d)
matchscores = ndx[:,0]
return matchscores | [
"def",
"match",
"(",
"desc1",
",",
"desc2",
",",
"threshold",
"=",
"0.5",
")",
":",
"n",
"=",
"len",
"(",
"desc1",
"[",
"0",
"]",
")",
"# pair-wise distances",
"d",
"=",
"-",
"ones",
"(",
"(",
"len",
"(",
"desc1",
")",
",",
"len",
"(",
"desc2",
")",
")",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"desc1",
")",
")",
":",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"desc2",
")",
")",
":",
"d1",
"=",
"(",
"desc1",
"[",
"i",
"]",
"-",
"mean",
"(",
"desc1",
"[",
"i",
"]",
")",
")",
"/",
"std",
"(",
"desc1",
"[",
"i",
"]",
")",
"d2",
"=",
"(",
"desc2",
"[",
"j",
"]",
"-",
"mean",
"(",
"desc2",
"[",
"j",
"]",
")",
")",
"/",
"std",
"(",
"desc2",
"[",
"j",
"]",
")",
"ncc_value",
"=",
"sum",
"(",
"d1",
"*",
"d2",
")",
"/",
"(",
"n",
"-",
"1",
")",
"if",
"ncc_value",
">",
"threshold",
":",
"d",
"[",
"i",
",",
"j",
"]",
"=",
"ncc_value",
"ndx",
"=",
"argsort",
"(",
"-",
"d",
")",
"matchscores",
"=",
"ndx",
"[",
":",
",",
"0",
"]",
"return",
"matchscores"
] | https://github.com/jesolem/PCV/blob/376d5975a033a7bde0102fa4b8fc29750d7fc98b/pcv_book/harris.py#L87-L107 |
|
pengchenglin/ATX-Test | fb3354b210934726af6a369746d6bdf6359f268d | Public/maxim_monkey.py | python | Maxim.run_monkey | (cls, monkey_shell, actions=False, widget_black=False) | 清理旧的配置文件并运行monkey,等待运行时间后pull log文件到电脑
:param monkey_shell: shell命令 uiautomatortroy 时 max.xpath.selector文件需要配置正确
:param actions: 特殊事件序列 max.xpath.actions文件需要配置正确
:param widget_black: 黑控件 黑区域屏蔽 max.widget.black文件需要配置正确
:return: | 清理旧的配置文件并运行monkey,等待运行时间后pull log文件到电脑
:param monkey_shell: shell命令 uiautomatortroy 时 max.xpath.selector文件需要配置正确
:param actions: 特殊事件序列 max.xpath.actions文件需要配置正确
:param widget_black: 黑控件 黑区域屏蔽 max.widget.black文件需要配置正确
:return: | [
"清理旧的配置文件并运行monkey,等待运行时间后pull",
"log文件到电脑",
":",
"param",
"monkey_shell",
":",
"shell命令",
"uiautomatortroy",
"时",
"max",
".",
"xpath",
".",
"selector文件需要配置正确",
":",
"param",
"actions",
":",
"特殊事件序列",
"max",
".",
"xpath",
".",
"actions文件需要配置正确",
":",
"param",
"widget_black",
":",
"黑控件",
"黑区域屏蔽",
"max",
".",
"widget",
".",
"black文件需要配置正确",
":",
"return",
":"
] | def run_monkey(cls, monkey_shell, actions=False, widget_black=False):
'''
清理旧的配置文件并运行monkey,等待运行时间后pull log文件到电脑
:param monkey_shell: shell命令 uiautomatortroy 时 max.xpath.selector文件需要配置正确
:param actions: 特殊事件序列 max.xpath.actions文件需要配置正确
:param widget_black: 黑控件 黑区域屏蔽 max.widget.black文件需要配置正确
:return:
'''
log.i('MONKEY_SHELL:%s' % monkey_shell)
cls.clear_env()
cls.push_jar()
if monkey_shell.find('awl.strings') != -1:
cls.push_white_list()
if monkey_shell.find('uiautomatortroy') != -1:
cls.push_selector()
if actions:
cls.push_actions()
if widget_black:
cls.push_widget_black()
cls.set_AdbIME()
runtime = monkey_shell.split('running-minutes ')[1].split(' ')[0]
log.i('starting run monkey')
log.i('It will be take about %s minutes,please be patient ...........................' % runtime)
# restore uiautomator server
cls.d.service('uiautomator').stop()
time.sleep(2)
cls.d.shell(monkey_shell)
time.sleep(int(runtime) * 60 + 30)
log.i('Maxim monkey run end>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
# restore uiautomator server
cls.d.service('uiautomator').start() | [
"def",
"run_monkey",
"(",
"cls",
",",
"monkey_shell",
",",
"actions",
"=",
"False",
",",
"widget_black",
"=",
"False",
")",
":",
"log",
".",
"i",
"(",
"'MONKEY_SHELL:%s'",
"%",
"monkey_shell",
")",
"cls",
".",
"clear_env",
"(",
")",
"cls",
".",
"push_jar",
"(",
")",
"if",
"monkey_shell",
".",
"find",
"(",
"'awl.strings'",
")",
"!=",
"-",
"1",
":",
"cls",
".",
"push_white_list",
"(",
")",
"if",
"monkey_shell",
".",
"find",
"(",
"'uiautomatortroy'",
")",
"!=",
"-",
"1",
":",
"cls",
".",
"push_selector",
"(",
")",
"if",
"actions",
":",
"cls",
".",
"push_actions",
"(",
")",
"if",
"widget_black",
":",
"cls",
".",
"push_widget_black",
"(",
")",
"cls",
".",
"set_AdbIME",
"(",
")",
"runtime",
"=",
"monkey_shell",
".",
"split",
"(",
"'running-minutes '",
")",
"[",
"1",
"]",
".",
"split",
"(",
"' '",
")",
"[",
"0",
"]",
"log",
".",
"i",
"(",
"'starting run monkey'",
")",
"log",
".",
"i",
"(",
"'It will be take about %s minutes,please be patient ...........................'",
"%",
"runtime",
")",
"# restore uiautomator server",
"cls",
".",
"d",
".",
"service",
"(",
"'uiautomator'",
")",
".",
"stop",
"(",
")",
"time",
".",
"sleep",
"(",
"2",
")",
"cls",
".",
"d",
".",
"shell",
"(",
"monkey_shell",
")",
"time",
".",
"sleep",
"(",
"int",
"(",
"runtime",
")",
"*",
"60",
"+",
"30",
")",
"log",
".",
"i",
"(",
"'Maxim monkey run end>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>'",
")",
"# restore uiautomator server",
"cls",
".",
"d",
".",
"service",
"(",
"'uiautomator'",
")",
".",
"start",
"(",
")"
] | https://github.com/pengchenglin/ATX-Test/blob/fb3354b210934726af6a369746d6bdf6359f268d/Public/maxim_monkey.py#L88-L118 |
||
Blizzard/heroprotocol | 3d36eaf44fc4c8ff3331c2ae2f1dc08a94535f1c | heroprotocol/versions/protocol39271.py | python | decode_replay_header | (contents) | return decoder.instance(replay_header_typeid) | Decodes and return the replay header from the contents byte string. | Decodes and return the replay header from the contents byte string. | [
"Decodes",
"and",
"return",
"the",
"replay",
"header",
"from",
"the",
"contents",
"byte",
"string",
"."
] | def decode_replay_header(contents):
"""Decodes and return the replay header from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
return decoder.instance(replay_header_typeid) | [
"def",
"decode_replay_header",
"(",
"contents",
")",
":",
"decoder",
"=",
"VersionedDecoder",
"(",
"contents",
",",
"typeinfos",
")",
"return",
"decoder",
".",
"instance",
"(",
"replay_header_typeid",
")"
] | https://github.com/Blizzard/heroprotocol/blob/3d36eaf44fc4c8ff3331c2ae2f1dc08a94535f1c/heroprotocol/versions/protocol39271.py#L446-L449 |
|
WerWolv/EdiZon_CheatsConfigsAndScripts | d16d36c7509c01dca770f402babd83ff2e9ae6e7 | Scripts/lib/python3.5/email/parser.py | python | BytesParser.__init__ | (self, *args, **kw) | Parser of binary RFC 2822 and MIME email messages.
Creates an in-memory object tree representing the email message, which
can then be manipulated and turned over to a Generator to return the
textual representation of the message.
The input must be formatted as a block of RFC 2822 headers and header
continuation lines, optionally preceded by a `Unix-from' header. The
header block is terminated either by the end of the input or by a
blank line.
_class is the class to instantiate for new message objects when they
must be created. This class must have a constructor that can take
zero arguments. Default is Message.Message. | Parser of binary RFC 2822 and MIME email messages. | [
"Parser",
"of",
"binary",
"RFC",
"2822",
"and",
"MIME",
"email",
"messages",
"."
] | def __init__(self, *args, **kw):
"""Parser of binary RFC 2822 and MIME email messages.
Creates an in-memory object tree representing the email message, which
can then be manipulated and turned over to a Generator to return the
textual representation of the message.
The input must be formatted as a block of RFC 2822 headers and header
continuation lines, optionally preceded by a `Unix-from' header. The
header block is terminated either by the end of the input or by a
blank line.
_class is the class to instantiate for new message objects when they
must be created. This class must have a constructor that can take
zero arguments. Default is Message.Message.
"""
self.parser = Parser(*args, **kw) | [
"def",
"__init__",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"self",
".",
"parser",
"=",
"Parser",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")"
] | https://github.com/WerWolv/EdiZon_CheatsConfigsAndScripts/blob/d16d36c7509c01dca770f402babd83ff2e9ae6e7/Scripts/lib/python3.5/email/parser.py#L82-L98 |
||
suurjaak/Skyperious | 6a4f264dbac8d326c2fa8aeb5483dbca987860bf | skyperious/lib/util.py | python | start_file | (filepath) | return success, error | Tries to open the specified file in the operating system.
@return (success, error message) | Tries to open the specified file in the operating system. | [
"Tries",
"to",
"open",
"the",
"specified",
"file",
"in",
"the",
"operating",
"system",
"."
] | def start_file(filepath):
"""
Tries to open the specified file in the operating system.
@return (success, error message)
"""
success, error = True, ""
try:
if "nt" == os.name:
try: os.startfile(filepath)
except WindowsError as e:
if 1155 == e.winerror: # ERROR_NO_ASSOCIATION
cmd = "Rundll32.exe SHELL32.dll, OpenAs_RunDLL %s"
os.popen(cmd % filepath)
else: raise
elif "mac" == os.name:
subprocess.call(("open", filepath))
elif "posix" == os.name:
subprocess.call(("xdg-open", filepath))
except Exception as e:
success, error = False, repr(e)
return success, error | [
"def",
"start_file",
"(",
"filepath",
")",
":",
"success",
",",
"error",
"=",
"True",
",",
"\"\"",
"try",
":",
"if",
"\"nt\"",
"==",
"os",
".",
"name",
":",
"try",
":",
"os",
".",
"startfile",
"(",
"filepath",
")",
"except",
"WindowsError",
"as",
"e",
":",
"if",
"1155",
"==",
"e",
".",
"winerror",
":",
"# ERROR_NO_ASSOCIATION",
"cmd",
"=",
"\"Rundll32.exe SHELL32.dll, OpenAs_RunDLL %s\"",
"os",
".",
"popen",
"(",
"cmd",
"%",
"filepath",
")",
"else",
":",
"raise",
"elif",
"\"mac\"",
"==",
"os",
".",
"name",
":",
"subprocess",
".",
"call",
"(",
"(",
"\"open\"",
",",
"filepath",
")",
")",
"elif",
"\"posix\"",
"==",
"os",
".",
"name",
":",
"subprocess",
".",
"call",
"(",
"(",
"\"xdg-open\"",
",",
"filepath",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"success",
",",
"error",
"=",
"False",
",",
"repr",
"(",
"e",
")",
"return",
"success",
",",
"error"
] | https://github.com/suurjaak/Skyperious/blob/6a4f264dbac8d326c2fa8aeb5483dbca987860bf/skyperious/lib/util.py#L303-L324 |
|
qutip/qutip | 52d01da181a21b810c3407812c670f35fdc647e8 | qutip/eseries.py | python | eseries.tidyup | (self, *args) | return self | Returns a tidier version of exponential series. | Returns a tidier version of exponential series. | [
"Returns",
"a",
"tidier",
"version",
"of",
"exponential",
"series",
"."
] | def tidyup(self, *args):
""" Returns a tidier version of exponential series.
"""
#
# combine duplicate entries (same rate)
#
rate_tol = 1e-10
ampl_tol = 1e-10
ampl_dict = {}
unique_rates = {}
ur_len = 0
for r_idx in range(len(self.rates)):
# look for a matching rate in the list of unique rates
idx = -1
for ur_key in unique_rates.keys():
if abs(self.rates[r_idx] - unique_rates[ur_key]) < rate_tol:
idx = ur_key
break
if idx == -1:
# no matching rate, add it
unique_rates[ur_len] = self.rates[r_idx]
ampl_dict[ur_len] = [self.ampl[r_idx]]
ur_len = len(unique_rates)
else:
# found matching rate, append amplitude to its list
ampl_dict[idx].append(self.ampl[r_idx])
# create new amplitude and rate list with only unique rates, and
# nonzero amplitudes
rates, ampl = [], []
for ur_key in unique_rates.keys():
total_ampl = sum(ampl_dict[ur_key])
if (isinstance(total_ampl, float) or
isinstance(total_ampl, complex)):
if abs(total_ampl) > ampl_tol:
rates.append(unique_rates[ur_key])
ampl.append(total_ampl)
else:
if abs(total_ampl.full()).max() > ampl_tol:
rates.append(unique_rates[ur_key])
ampl.append(total_ampl)
self.rates = np.array(rates)
self.ampl = np.empty((len(ampl),), dtype=object)
self.ampl[:] = ampl
return self | [
"def",
"tidyup",
"(",
"self",
",",
"*",
"args",
")",
":",
"#",
"# combine duplicate entries (same rate)",
"#",
"rate_tol",
"=",
"1e-10",
"ampl_tol",
"=",
"1e-10",
"ampl_dict",
"=",
"{",
"}",
"unique_rates",
"=",
"{",
"}",
"ur_len",
"=",
"0",
"for",
"r_idx",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"rates",
")",
")",
":",
"# look for a matching rate in the list of unique rates",
"idx",
"=",
"-",
"1",
"for",
"ur_key",
"in",
"unique_rates",
".",
"keys",
"(",
")",
":",
"if",
"abs",
"(",
"self",
".",
"rates",
"[",
"r_idx",
"]",
"-",
"unique_rates",
"[",
"ur_key",
"]",
")",
"<",
"rate_tol",
":",
"idx",
"=",
"ur_key",
"break",
"if",
"idx",
"==",
"-",
"1",
":",
"# no matching rate, add it",
"unique_rates",
"[",
"ur_len",
"]",
"=",
"self",
".",
"rates",
"[",
"r_idx",
"]",
"ampl_dict",
"[",
"ur_len",
"]",
"=",
"[",
"self",
".",
"ampl",
"[",
"r_idx",
"]",
"]",
"ur_len",
"=",
"len",
"(",
"unique_rates",
")",
"else",
":",
"# found matching rate, append amplitude to its list",
"ampl_dict",
"[",
"idx",
"]",
".",
"append",
"(",
"self",
".",
"ampl",
"[",
"r_idx",
"]",
")",
"# create new amplitude and rate list with only unique rates, and",
"# nonzero amplitudes",
"rates",
",",
"ampl",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"ur_key",
"in",
"unique_rates",
".",
"keys",
"(",
")",
":",
"total_ampl",
"=",
"sum",
"(",
"ampl_dict",
"[",
"ur_key",
"]",
")",
"if",
"(",
"isinstance",
"(",
"total_ampl",
",",
"float",
")",
"or",
"isinstance",
"(",
"total_ampl",
",",
"complex",
")",
")",
":",
"if",
"abs",
"(",
"total_ampl",
")",
">",
"ampl_tol",
":",
"rates",
".",
"append",
"(",
"unique_rates",
"[",
"ur_key",
"]",
")",
"ampl",
".",
"append",
"(",
"total_ampl",
")",
"else",
":",
"if",
"abs",
"(",
"total_ampl",
".",
"full",
"(",
")",
")",
".",
"max",
"(",
")",
">",
"ampl_tol",
":",
"rates",
".",
"append",
"(",
"unique_rates",
"[",
"ur_key",
"]",
")",
"ampl",
".",
"append",
"(",
"total_ampl",
")",
"self",
".",
"rates",
"=",
"np",
".",
"array",
"(",
"rates",
")",
"self",
".",
"ampl",
"=",
"np",
".",
"empty",
"(",
"(",
"len",
"(",
"ampl",
")",
",",
")",
",",
"dtype",
"=",
"object",
")",
"self",
".",
"ampl",
"[",
":",
"]",
"=",
"ampl",
"return",
"self"
] | https://github.com/qutip/qutip/blob/52d01da181a21b810c3407812c670f35fdc647e8/qutip/eseries.py#L277-L323 |
|
phantomcyber/playbooks | 9e850ecc44cb98c5dde53784744213a1ed5799bd | malware_hunt_and_contain.py | python | join_filter_2 | (action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None) | return | [] | def join_filter_2(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None):
phantom.debug('join_filter_2() called')
# check if all connected incoming playbooks, actions, or custom functions are done i.e. have succeeded or failed
if phantom.completed(action_names=['logoff_user_1', 'shutdown_system_1', 'disable_user_1', 'block_hash_3']):
# call connected block "filter_2"
filter_2(container=container, handle=handle)
return | [
"def",
"join_filter_2",
"(",
"action",
"=",
"None",
",",
"success",
"=",
"None",
",",
"container",
"=",
"None",
",",
"results",
"=",
"None",
",",
"handle",
"=",
"None",
",",
"filtered_artifacts",
"=",
"None",
",",
"filtered_results",
"=",
"None",
",",
"custom_function",
"=",
"None",
")",
":",
"phantom",
".",
"debug",
"(",
"'join_filter_2() called'",
")",
"# check if all connected incoming playbooks, actions, or custom functions are done i.e. have succeeded or failed",
"if",
"phantom",
".",
"completed",
"(",
"action_names",
"=",
"[",
"'logoff_user_1'",
",",
"'shutdown_system_1'",
",",
"'disable_user_1'",
",",
"'block_hash_3'",
"]",
")",
":",
"# call connected block \"filter_2\"",
"filter_2",
"(",
"container",
"=",
"container",
",",
"handle",
"=",
"handle",
")",
"return"
] | https://github.com/phantomcyber/playbooks/blob/9e850ecc44cb98c5dde53784744213a1ed5799bd/malware_hunt_and_contain.py#L132-L141 |
|||
MachineLP/OCR_Keras | 9b9a1826b18697acdce2a37c48a81542b4922b64 | ocr_lib/crnn.py | python | CRNN.model_cnn | (self, width) | return base_model, conv_shape | [] | def model_cnn(self, width):
self.input_tensor = Input((self._height, width, 3))
x = self.input_tensor
for i, n_cnn in enumerate([3, 4, 6]):
for j in range(n_cnn):
x = Conv2D(32*2**i, (3, 3), padding='same', kernel_initializer='he_uniform',
kernel_regularizer=l2(self._l2_rate))(x)
x = BatchNormalization(gamma_regularizer=l2(self._l2_rate), beta_regularizer=l2(self._l2_rate))(x)
x = Activation('relu')(x)
x = MaxPooling2D((2, 2))(x)
cnn_model = Model(self.input_tensor, x, name='cnn')
x = cnn_model(self.input_tensor)
conv_shape = x.get_shape().as_list()
#rnn_length = conv_shape[1]
#rnn_dimen = conv_shape[3]*conv_shape[2]
#print (conv_shape, rnn_length, rnn_dimen)
# x = Permute((2, 1, 3), name='permute')(x)
x = Permute((2,1,3),name='permute')(x)
x = TimeDistributed(Flatten(), name='flatten')(x)
# x = Dropout(0.2)(x)
x = Dense(self._n_class, activation='softmax', kernel_regularizer=l2(self._l2_rate), bias_regularizer=l2(self._l2_rate), name='output_cnn')(x)
rnn_out = x
base_model = Model(self.input_tensor, x)
return base_model, conv_shape | [
"def",
"model_cnn",
"(",
"self",
",",
"width",
")",
":",
"self",
".",
"input_tensor",
"=",
"Input",
"(",
"(",
"self",
".",
"_height",
",",
"width",
",",
"3",
")",
")",
"x",
"=",
"self",
".",
"input_tensor",
"for",
"i",
",",
"n_cnn",
"in",
"enumerate",
"(",
"[",
"3",
",",
"4",
",",
"6",
"]",
")",
":",
"for",
"j",
"in",
"range",
"(",
"n_cnn",
")",
":",
"x",
"=",
"Conv2D",
"(",
"32",
"*",
"2",
"**",
"i",
",",
"(",
"3",
",",
"3",
")",
",",
"padding",
"=",
"'same'",
",",
"kernel_initializer",
"=",
"'he_uniform'",
",",
"kernel_regularizer",
"=",
"l2",
"(",
"self",
".",
"_l2_rate",
")",
")",
"(",
"x",
")",
"x",
"=",
"BatchNormalization",
"(",
"gamma_regularizer",
"=",
"l2",
"(",
"self",
".",
"_l2_rate",
")",
",",
"beta_regularizer",
"=",
"l2",
"(",
"self",
".",
"_l2_rate",
")",
")",
"(",
"x",
")",
"x",
"=",
"Activation",
"(",
"'relu'",
")",
"(",
"x",
")",
"x",
"=",
"MaxPooling2D",
"(",
"(",
"2",
",",
"2",
")",
")",
"(",
"x",
")",
"cnn_model",
"=",
"Model",
"(",
"self",
".",
"input_tensor",
",",
"x",
",",
"name",
"=",
"'cnn'",
")",
"x",
"=",
"cnn_model",
"(",
"self",
".",
"input_tensor",
")",
"conv_shape",
"=",
"x",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"#rnn_length = conv_shape[1]",
"#rnn_dimen = conv_shape[3]*conv_shape[2]",
"#print (conv_shape, rnn_length, rnn_dimen)",
"# x = Permute((2, 1, 3), name='permute')(x)",
"x",
"=",
"Permute",
"(",
"(",
"2",
",",
"1",
",",
"3",
")",
",",
"name",
"=",
"'permute'",
")",
"(",
"x",
")",
"x",
"=",
"TimeDistributed",
"(",
"Flatten",
"(",
")",
",",
"name",
"=",
"'flatten'",
")",
"(",
"x",
")",
"# x = Dropout(0.2)(x)",
"x",
"=",
"Dense",
"(",
"self",
".",
"_n_class",
",",
"activation",
"=",
"'softmax'",
",",
"kernel_regularizer",
"=",
"l2",
"(",
"self",
".",
"_l2_rate",
")",
",",
"bias_regularizer",
"=",
"l2",
"(",
"self",
".",
"_l2_rate",
")",
",",
"name",
"=",
"'output_cnn'",
")",
"(",
"x",
")",
"rnn_out",
"=",
"x",
"base_model",
"=",
"Model",
"(",
"self",
".",
"input_tensor",
",",
"x",
")",
"return",
"base_model",
",",
"conv_shape"
] | https://github.com/MachineLP/OCR_Keras/blob/9b9a1826b18697acdce2a37c48a81542b4922b64/ocr_lib/crnn.py#L172-L202 |
|||
tomplus/kubernetes_asyncio | f028cc793e3a2c519be6a52a49fb77ff0b014c9b | kubernetes_asyncio/client/models/v1_storage_class_list.py | python | V1StorageClassList.__repr__ | (self) | return self.to_str() | For `print` and `pprint` | For `print` and `pprint` | [
"For",
"print",
"and",
"pprint"
] | def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str() | [
"def",
"__repr__",
"(",
"self",
")",
":",
"return",
"self",
".",
"to_str",
"(",
")"
] | https://github.com/tomplus/kubernetes_asyncio/blob/f028cc793e3a2c519be6a52a49fb77ff0b014c9b/kubernetes_asyncio/client/models/v1_storage_class_list.py#L189-L191 |
|
golismero/golismero | 7d605b937e241f51c1ca4f47b20f755eeefb9d76 | tools/sqlmap/lib/core/common.py | python | parseFilePaths | (page) | Detects (possible) absolute system paths inside the provided page content | Detects (possible) absolute system paths inside the provided page content | [
"Detects",
"(",
"possible",
")",
"absolute",
"system",
"paths",
"inside",
"the",
"provided",
"page",
"content"
] | def parseFilePaths(page):
"""
Detects (possible) absolute system paths inside the provided page content
"""
if page:
for regex in (r" in <b>(?P<result>.*?)</b> on line", r"(?:>|\s)(?P<result>[A-Za-z]:[\\/][\w.\\/]*)", r"(?:>|\s)(?P<result>/\w[/\w.]+)"):
for match in re.finditer(regex, page):
absFilePath = match.group("result").strip()
page = page.replace(absFilePath, "")
if isWindowsDriveLetterPath(absFilePath):
absFilePath = posixToNtSlashes(absFilePath)
if absFilePath not in kb.absFilePaths:
kb.absFilePaths.add(absFilePath) | [
"def",
"parseFilePaths",
"(",
"page",
")",
":",
"if",
"page",
":",
"for",
"regex",
"in",
"(",
"r\" in <b>(?P<result>.*?)</b> on line\"",
",",
"r\"(?:>|\\s)(?P<result>[A-Za-z]:[\\\\/][\\w.\\\\/]*)\"",
",",
"r\"(?:>|\\s)(?P<result>/\\w[/\\w.]+)\"",
")",
":",
"for",
"match",
"in",
"re",
".",
"finditer",
"(",
"regex",
",",
"page",
")",
":",
"absFilePath",
"=",
"match",
".",
"group",
"(",
"\"result\"",
")",
".",
"strip",
"(",
")",
"page",
"=",
"page",
".",
"replace",
"(",
"absFilePath",
",",
"\"\"",
")",
"if",
"isWindowsDriveLetterPath",
"(",
"absFilePath",
")",
":",
"absFilePath",
"=",
"posixToNtSlashes",
"(",
"absFilePath",
")",
"if",
"absFilePath",
"not",
"in",
"kb",
".",
"absFilePaths",
":",
"kb",
".",
"absFilePaths",
".",
"add",
"(",
"absFilePath",
")"
] | https://github.com/golismero/golismero/blob/7d605b937e241f51c1ca4f47b20f755eeefb9d76/tools/sqlmap/lib/core/common.py#L1334-L1349 |
||
PyCQA/pylint | 3fc855f9d0fa8e6410be5a23cf954ffd5471b4eb | pylint/extensions/overlapping_exceptions.py | python | OverlappingExceptionsChecker.visit_tryexcept | (self, node: nodes.TryExcept) | check for empty except | check for empty except | [
"check",
"for",
"empty",
"except"
] | def visit_tryexcept(self, node: nodes.TryExcept) -> None:
"""check for empty except"""
for handler in node.handlers:
if handler.type is None:
continue
if isinstance(handler.type, astroid.BoolOp):
continue
try:
excs = list(_annotated_unpack_infer(handler.type))
except astroid.InferenceError:
continue
handled_in_clause: List[Tuple[Any, Any]] = []
for part, exc in excs:
if exc is astroid.Uninferable:
continue
if isinstance(exc, astroid.Instance) and utils.inherit_from_std_ex(exc):
exc = exc._proxied
if not isinstance(exc, astroid.ClassDef):
continue
exc_ancestors = [
anc for anc in exc.ancestors() if isinstance(anc, astroid.ClassDef)
]
for prev_part, prev_exc in handled_in_clause:
prev_exc_ancestors = [
anc
for anc in prev_exc.ancestors()
if isinstance(anc, astroid.ClassDef)
]
if exc == prev_exc:
self.add_message(
"overlapping-except",
node=handler.type,
args=f"{prev_part.as_string()} and {part.as_string()} are the same",
)
elif prev_exc in exc_ancestors or exc in prev_exc_ancestors:
ancestor = part if exc in prev_exc_ancestors else prev_part
descendant = part if prev_exc in exc_ancestors else prev_part
self.add_message(
"overlapping-except",
node=handler.type,
args=f"{ancestor.as_string()} is an ancestor class of {descendant.as_string()}",
)
handled_in_clause += [(part, exc)] | [
"def",
"visit_tryexcept",
"(",
"self",
",",
"node",
":",
"nodes",
".",
"TryExcept",
")",
"->",
"None",
":",
"for",
"handler",
"in",
"node",
".",
"handlers",
":",
"if",
"handler",
".",
"type",
"is",
"None",
":",
"continue",
"if",
"isinstance",
"(",
"handler",
".",
"type",
",",
"astroid",
".",
"BoolOp",
")",
":",
"continue",
"try",
":",
"excs",
"=",
"list",
"(",
"_annotated_unpack_infer",
"(",
"handler",
".",
"type",
")",
")",
"except",
"astroid",
".",
"InferenceError",
":",
"continue",
"handled_in_clause",
":",
"List",
"[",
"Tuple",
"[",
"Any",
",",
"Any",
"]",
"]",
"=",
"[",
"]",
"for",
"part",
",",
"exc",
"in",
"excs",
":",
"if",
"exc",
"is",
"astroid",
".",
"Uninferable",
":",
"continue",
"if",
"isinstance",
"(",
"exc",
",",
"astroid",
".",
"Instance",
")",
"and",
"utils",
".",
"inherit_from_std_ex",
"(",
"exc",
")",
":",
"exc",
"=",
"exc",
".",
"_proxied",
"if",
"not",
"isinstance",
"(",
"exc",
",",
"astroid",
".",
"ClassDef",
")",
":",
"continue",
"exc_ancestors",
"=",
"[",
"anc",
"for",
"anc",
"in",
"exc",
".",
"ancestors",
"(",
")",
"if",
"isinstance",
"(",
"anc",
",",
"astroid",
".",
"ClassDef",
")",
"]",
"for",
"prev_part",
",",
"prev_exc",
"in",
"handled_in_clause",
":",
"prev_exc_ancestors",
"=",
"[",
"anc",
"for",
"anc",
"in",
"prev_exc",
".",
"ancestors",
"(",
")",
"if",
"isinstance",
"(",
"anc",
",",
"astroid",
".",
"ClassDef",
")",
"]",
"if",
"exc",
"==",
"prev_exc",
":",
"self",
".",
"add_message",
"(",
"\"overlapping-except\"",
",",
"node",
"=",
"handler",
".",
"type",
",",
"args",
"=",
"f\"{prev_part.as_string()} and {part.as_string()} are the same\"",
",",
")",
"elif",
"prev_exc",
"in",
"exc_ancestors",
"or",
"exc",
"in",
"prev_exc_ancestors",
":",
"ancestor",
"=",
"part",
"if",
"exc",
"in",
"prev_exc_ancestors",
"else",
"prev_part",
"descendant",
"=",
"part",
"if",
"prev_exc",
"in",
"exc_ancestors",
"else",
"prev_part",
"self",
".",
"add_message",
"(",
"\"overlapping-except\"",
",",
"node",
"=",
"handler",
".",
"type",
",",
"args",
"=",
"f\"{ancestor.as_string()} is an ancestor class of {descendant.as_string()}\"",
",",
")",
"handled_in_clause",
"+=",
"[",
"(",
"part",
",",
"exc",
")",
"]"
] | https://github.com/PyCQA/pylint/blob/3fc855f9d0fa8e6410be5a23cf954ffd5471b4eb/pylint/extensions/overlapping_exceptions.py#L39-L85 |
||
pyparallel/pyparallel | 11e8c6072d48c8f13641925d17b147bf36ee0ba3 | Lib/site-packages/pip-7.1.2-py3.3.egg/pip/_vendor/requests/utils.py | python | from_key_val_list | (value) | return OrderedDict(value) | Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')]) | Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g., | [
"Take",
"an",
"object",
"and",
"test",
"to",
"see",
"if",
"it",
"can",
"be",
"represented",
"as",
"a",
"dictionary",
".",
"Unless",
"it",
"can",
"not",
"be",
"represented",
"as",
"such",
"return",
"an",
"OrderedDict",
"e",
".",
"g",
"."
] | def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value) | [
"def",
"from_key_val_list",
"(",
"value",
")",
":",
"if",
"value",
"is",
"None",
":",
"return",
"None",
"if",
"isinstance",
"(",
"value",
",",
"(",
"str",
",",
"bytes",
",",
"bool",
",",
"int",
")",
")",
":",
"raise",
"ValueError",
"(",
"'cannot encode objects that are not 2-tuples'",
")",
"return",
"OrderedDict",
"(",
"value",
")"
] | https://github.com/pyparallel/pyparallel/blob/11e8c6072d48c8f13641925d17b147bf36ee0ba3/Lib/site-packages/pip-7.1.2-py3.3.egg/pip/_vendor/requests/utils.py#L124-L144 |
|
lisa-lab/pylearn2 | af81e5c362f0df4df85c3e54e23b2adeec026055 | pylearn2/rbm_tools.py | python | AIS.run | (self, n_steps=1) | Performs the grunt-work, implementing
.. math::
log\:w^{(i)} += \mathcal{F}_{k-1}(v_{k-1}) - \mathcal{F}_{k}(v_{k-1})
recursively for all temperatures.
Parameters
----------
n_steps : int, optional
WRITEME | Performs the grunt-work, implementing | [
"Performs",
"the",
"grunt",
"-",
"work",
"implementing"
] | def run(self, n_steps=1):
"""
Performs the grunt-work, implementing
.. math::
log\:w^{(i)} += \mathcal{F}_{k-1}(v_{k-1}) - \mathcal{F}_{k}(v_{k-1})
recursively for all temperatures.
Parameters
----------
n_steps : int, optional
WRITEME
"""
if not hasattr(self, 'betas'):
self.set_betas()
self.std_ais_w = [] # used to log std of log_ais_w regularly
self.logz_beta = [] # used to log log_ais_w at every `key_beta` value
self.var_logz_beta = [] # used to log variance of log_ais_w as above
# initial sample
state = self.v_sample0
ki = 0
# loop over all temperatures from beta=0 to beta=1
for i in range(len(self.betas) - 1):
bp, bp1 = self.betas[i], self.betas[i + 1]
# log-ratio of (free) energies for two nearby temperatures
self.log_ais_w += \
self.free_energy_fn(bp, state) - \
self.free_energy_fn(bp1, state)
# log standard deviation of AIS weights (kind of deprecated)
if (i + 1) % self.log_int == 0:
m = numpy.max(self.log_ais_w)
std_ais = (numpy.log(numpy.std(numpy.exp(self.log_ais_w - m)))
+ m - numpy.log(self.n_runs) / 2)
self.std_ais_w.append(std_ais)
# whenever we reach a "key" beta value, log log_ais_w and
# var(log_ais_w) so we can estimate log_Z_{beta=key_betas[i]} after
# the fact.
if self.key_betas is not None and \
ki < len(self.key_betas) and \
bp1 == self.key_betas[ki]:
log_ais_w_bi, var_log_ais_w_bi = \
self.estimate_from_weights(self.log_ais_w)
self.logz_beta.insert(0, log_ais_w_bi)
self.var_logz_beta.insert(0, var_log_ais_w_bi)
ki += 1
# generate a new sample at temperature beta_{i+1}
state = self.sample_fn(bp1, state) | [
"def",
"run",
"(",
"self",
",",
"n_steps",
"=",
"1",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'betas'",
")",
":",
"self",
".",
"set_betas",
"(",
")",
"self",
".",
"std_ais_w",
"=",
"[",
"]",
"# used to log std of log_ais_w regularly",
"self",
".",
"logz_beta",
"=",
"[",
"]",
"# used to log log_ais_w at every `key_beta` value",
"self",
".",
"var_logz_beta",
"=",
"[",
"]",
"# used to log variance of log_ais_w as above",
"# initial sample",
"state",
"=",
"self",
".",
"v_sample0",
"ki",
"=",
"0",
"# loop over all temperatures from beta=0 to beta=1",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"betas",
")",
"-",
"1",
")",
":",
"bp",
",",
"bp1",
"=",
"self",
".",
"betas",
"[",
"i",
"]",
",",
"self",
".",
"betas",
"[",
"i",
"+",
"1",
"]",
"# log-ratio of (free) energies for two nearby temperatures",
"self",
".",
"log_ais_w",
"+=",
"self",
".",
"free_energy_fn",
"(",
"bp",
",",
"state",
")",
"-",
"self",
".",
"free_energy_fn",
"(",
"bp1",
",",
"state",
")",
"# log standard deviation of AIS weights (kind of deprecated)",
"if",
"(",
"i",
"+",
"1",
")",
"%",
"self",
".",
"log_int",
"==",
"0",
":",
"m",
"=",
"numpy",
".",
"max",
"(",
"self",
".",
"log_ais_w",
")",
"std_ais",
"=",
"(",
"numpy",
".",
"log",
"(",
"numpy",
".",
"std",
"(",
"numpy",
".",
"exp",
"(",
"self",
".",
"log_ais_w",
"-",
"m",
")",
")",
")",
"+",
"m",
"-",
"numpy",
".",
"log",
"(",
"self",
".",
"n_runs",
")",
"/",
"2",
")",
"self",
".",
"std_ais_w",
".",
"append",
"(",
"std_ais",
")",
"# whenever we reach a \"key\" beta value, log log_ais_w and",
"# var(log_ais_w) so we can estimate log_Z_{beta=key_betas[i]} after",
"# the fact.",
"if",
"self",
".",
"key_betas",
"is",
"not",
"None",
"and",
"ki",
"<",
"len",
"(",
"self",
".",
"key_betas",
")",
"and",
"bp1",
"==",
"self",
".",
"key_betas",
"[",
"ki",
"]",
":",
"log_ais_w_bi",
",",
"var_log_ais_w_bi",
"=",
"self",
".",
"estimate_from_weights",
"(",
"self",
".",
"log_ais_w",
")",
"self",
".",
"logz_beta",
".",
"insert",
"(",
"0",
",",
"log_ais_w_bi",
")",
"self",
".",
"var_logz_beta",
".",
"insert",
"(",
"0",
",",
"var_log_ais_w_bi",
")",
"ki",
"+=",
"1",
"# generate a new sample at temperature beta_{i+1}",
"state",
"=",
"self",
".",
"sample_fn",
"(",
"bp1",
",",
"state",
")"
] | https://github.com/lisa-lab/pylearn2/blob/af81e5c362f0df4df85c3e54e23b2adeec026055/pylearn2/rbm_tools.py#L459-L513 |
||
dimagi/commcare-hq | d67ff1d3b4c51fa050c19e60c3253a79d3452a39 | corehq/motech/auth.py | python | OAuth2PasswordGrantManager.__init__ | (
self,
base_url: str,
username: str,
password: str,
client_id: str,
client_secret: str,
token_url: str,
refresh_url: str,
pass_credentials_in_header: bool,
connection_settings: 'ConnectionSettings',
) | [] | def __init__(
self,
base_url: str,
username: str,
password: str,
client_id: str,
client_secret: str,
token_url: str,
refresh_url: str,
pass_credentials_in_header: bool,
connection_settings: 'ConnectionSettings',
):
self.base_url = base_url
self.username = username
self.password = password
self.client_id = client_id
self.client_secret = client_secret
self.token_url = token_url
self.refresh_url = refresh_url
self.pass_credentials_in_header = pass_credentials_in_header
self.connection_settings = connection_settings | [
"def",
"__init__",
"(",
"self",
",",
"base_url",
":",
"str",
",",
"username",
":",
"str",
",",
"password",
":",
"str",
",",
"client_id",
":",
"str",
",",
"client_secret",
":",
"str",
",",
"token_url",
":",
"str",
",",
"refresh_url",
":",
"str",
",",
"pass_credentials_in_header",
":",
"bool",
",",
"connection_settings",
":",
"'ConnectionSettings'",
",",
")",
":",
"self",
".",
"base_url",
"=",
"base_url",
"self",
".",
"username",
"=",
"username",
"self",
".",
"password",
"=",
"password",
"self",
".",
"client_id",
"=",
"client_id",
"self",
".",
"client_secret",
"=",
"client_secret",
"self",
".",
"token_url",
"=",
"token_url",
"self",
".",
"refresh_url",
"=",
"refresh_url",
"self",
".",
"pass_credentials_in_header",
"=",
"pass_credentials_in_header",
"self",
".",
"connection_settings",
"=",
"connection_settings"
] | https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/corehq/motech/auth.py#L270-L290 |
||||
bruderstein/PythonScript | df9f7071ddf3a079e3a301b9b53a6dc78cf1208f | PythonLib/full/importlib/_bootstrap.py | python | BuiltinImporter.is_package | (cls, fullname) | return False | Return False as built-in modules are never packages. | Return False as built-in modules are never packages. | [
"Return",
"False",
"as",
"built",
"-",
"in",
"modules",
"are",
"never",
"packages",
"."
] | def is_package(cls, fullname):
"""Return False as built-in modules are never packages."""
return False | [
"def",
"is_package",
"(",
"cls",
",",
"fullname",
")",
":",
"return",
"False"
] | https://github.com/bruderstein/PythonScript/blob/df9f7071ddf3a079e3a301b9b53a6dc78cf1208f/PythonLib/full/importlib/_bootstrap.py#L797-L799 |
|
openedx/edx-platform | 68dd185a0ab45862a2a61e0f803d7e03d2be71b5 | common/djangoapps/split_modulestore_django/migrations/0002_data_migration.py | python | reverse_func | (apps, schema_editor) | Reversing the data migration is a no-op, because edX.org used a migration path path that started with writing to
both MySQL+MongoDB while still reading from MongoDB, then later executed this data migration, then later cut over to
reading from MySQL only. If we reversed this by deleting all entries, it would undo any writes that took place
before this data migration, which are unrelated. | Reversing the data migration is a no-op, because edX.org used a migration path path that started with writing to
both MySQL+MongoDB while still reading from MongoDB, then later executed this data migration, then later cut over to
reading from MySQL only. If we reversed this by deleting all entries, it would undo any writes that took place
before this data migration, which are unrelated. | [
"Reversing",
"the",
"data",
"migration",
"is",
"a",
"no",
"-",
"op",
"because",
"edX",
".",
"org",
"used",
"a",
"migration",
"path",
"path",
"that",
"started",
"with",
"writing",
"to",
"both",
"MySQL",
"+",
"MongoDB",
"while",
"still",
"reading",
"from",
"MongoDB",
"then",
"later",
"executed",
"this",
"data",
"migration",
"then",
"later",
"cut",
"over",
"to",
"reading",
"from",
"MySQL",
"only",
".",
"If",
"we",
"reversed",
"this",
"by",
"deleting",
"all",
"entries",
"it",
"would",
"undo",
"any",
"writes",
"that",
"took",
"place",
"before",
"this",
"data",
"migration",
"which",
"are",
"unrelated",
"."
] | def reverse_func(apps, schema_editor):
"""
Reversing the data migration is a no-op, because edX.org used a migration path path that started with writing to
both MySQL+MongoDB while still reading from MongoDB, then later executed this data migration, then later cut over to
reading from MySQL only. If we reversed this by deleting all entries, it would undo any writes that took place
before this data migration, which are unrelated.
"""
pass | [
"def",
"reverse_func",
"(",
"apps",
",",
"schema_editor",
")",
":",
"pass"
] | https://github.com/openedx/edx-platform/blob/68dd185a0ab45862a2a61e0f803d7e03d2be71b5/common/djangoapps/split_modulestore_django/migrations/0002_data_migration.py#L62-L69 |
||
Patrowl/PatrowlEngines | 61dce987b662177396fa2f914ae07fd651179daf | engines/openvas/engine-openvas-omp.py | python | create_task | (target_name, target_id) | return task_id | This function creates a task_id in OpenVAS and returns its task_id | This function creates a task_id in OpenVAS and returns its task_id | [
"This",
"function",
"creates",
"a",
"task_id",
"in",
"OpenVAS",
"and",
"returns",
"its",
"task_id"
] | def create_task(target_name, target_id):
"""
This function creates a task_id in OpenVAS and returns its task_id
"""
result = omp_cmd(["-C", "-c", engine.scanner["scan_config"], "--name", target_name, "--target", target_id]).split("\n")
task_id = result[0]
if not is_uuid(task_id):
return None
return task_id | [
"def",
"create_task",
"(",
"target_name",
",",
"target_id",
")",
":",
"result",
"=",
"omp_cmd",
"(",
"[",
"\"-C\"",
",",
"\"-c\"",
",",
"engine",
".",
"scanner",
"[",
"\"scan_config\"",
"]",
",",
"\"--name\"",
",",
"target_name",
",",
"\"--target\"",
",",
"target_id",
"]",
")",
".",
"split",
"(",
"\"\\n\"",
")",
"task_id",
"=",
"result",
"[",
"0",
"]",
"if",
"not",
"is_uuid",
"(",
"task_id",
")",
":",
"return",
"None",
"return",
"task_id"
] | https://github.com/Patrowl/PatrowlEngines/blob/61dce987b662177396fa2f914ae07fd651179daf/engines/openvas/engine-openvas-omp.py#L179-L187 |
|
dbt-labs/dbt-core | e943b9fc842535e958ef4fd0b8703adc91556bc6 | core/dbt/parser/manifest.py | python | ManifestLoader.load_and_parse_macros | (self, project_parser_files) | [] | def load_and_parse_macros(self, project_parser_files):
for project in self.all_projects.values():
if project.project_name not in project_parser_files:
continue
parser_files = project_parser_files[project.project_name]
if 'MacroParser' in parser_files:
parser = MacroParser(project, self.manifest)
for file_id in parser_files['MacroParser']:
block = FileBlock(self.manifest.files[file_id])
parser.parse_file(block)
# increment parsed path count for performance tracking
self._perf_info.parsed_path_count += 1
# generic tests hisotrically lived in the macros directoy but can now be nested
# in a /generic directory under /tests so we want to process them here as well
if 'GenericTestParser' in parser_files:
parser = GenericTestParser(project, self.manifest)
for file_id in parser_files['GenericTestParser']:
block = FileBlock(self.manifest.files[file_id])
parser.parse_file(block)
# increment parsed path count for performance tracking
self._perf_info.parsed_path_count += 1
self.build_macro_resolver()
# Look at changed macros and update the macro.depends_on.macros
self.macro_depends_on() | [
"def",
"load_and_parse_macros",
"(",
"self",
",",
"project_parser_files",
")",
":",
"for",
"project",
"in",
"self",
".",
"all_projects",
".",
"values",
"(",
")",
":",
"if",
"project",
".",
"project_name",
"not",
"in",
"project_parser_files",
":",
"continue",
"parser_files",
"=",
"project_parser_files",
"[",
"project",
".",
"project_name",
"]",
"if",
"'MacroParser'",
"in",
"parser_files",
":",
"parser",
"=",
"MacroParser",
"(",
"project",
",",
"self",
".",
"manifest",
")",
"for",
"file_id",
"in",
"parser_files",
"[",
"'MacroParser'",
"]",
":",
"block",
"=",
"FileBlock",
"(",
"self",
".",
"manifest",
".",
"files",
"[",
"file_id",
"]",
")",
"parser",
".",
"parse_file",
"(",
"block",
")",
"# increment parsed path count for performance tracking",
"self",
".",
"_perf_info",
".",
"parsed_path_count",
"+=",
"1",
"# generic tests hisotrically lived in the macros directoy but can now be nested",
"# in a /generic directory under /tests so we want to process them here as well",
"if",
"'GenericTestParser'",
"in",
"parser_files",
":",
"parser",
"=",
"GenericTestParser",
"(",
"project",
",",
"self",
".",
"manifest",
")",
"for",
"file_id",
"in",
"parser_files",
"[",
"'GenericTestParser'",
"]",
":",
"block",
"=",
"FileBlock",
"(",
"self",
".",
"manifest",
".",
"files",
"[",
"file_id",
"]",
")",
"parser",
".",
"parse_file",
"(",
"block",
")",
"# increment parsed path count for performance tracking",
"self",
".",
"_perf_info",
".",
"parsed_path_count",
"+=",
"1",
"self",
".",
"build_macro_resolver",
"(",
")",
"# Look at changed macros and update the macro.depends_on.macros",
"self",
".",
"macro_depends_on",
"(",
")"
] | https://github.com/dbt-labs/dbt-core/blob/e943b9fc842535e958ef4fd0b8703adc91556bc6/core/dbt/parser/manifest.py#L393-L417 |
||||
deepgram/kur | fd0c120e50815c1e5be64e5dde964dcd47234556 | kur/model/model.py | python | Model.register_provider | (self, provider) | Let's the model know which data provider we are using.
This allows the model to do shape inference for layers. | Let's the model know which data provider we are using. | [
"Let",
"s",
"the",
"model",
"know",
"which",
"data",
"provider",
"we",
"are",
"using",
"."
] | def register_provider(self, provider):
""" Let's the model know which data provider we are using.
This allows the model to do shape inference for layers.
"""
self.provider = provider | [
"def",
"register_provider",
"(",
"self",
",",
"provider",
")",
":",
"self",
".",
"provider",
"=",
"provider"
] | https://github.com/deepgram/kur/blob/fd0c120e50815c1e5be64e5dde964dcd47234556/kur/model/model.py#L102-L107 |
||
larryhastings/gilectomy | 4315ec3f1d6d4f813cc82ce27a24e7f784dbfc1a | Lib/idlelib/configHandler.py | python | IdleUserConfParser.RemoveEmptySections | (self) | Remove any sections that have no options. | Remove any sections that have no options. | [
"Remove",
"any",
"sections",
"that",
"have",
"no",
"options",
"."
] | def RemoveEmptySections(self):
"Remove any sections that have no options."
for section in self.sections():
if not self.GetOptionList(section):
self.remove_section(section) | [
"def",
"RemoveEmptySections",
"(",
"self",
")",
":",
"for",
"section",
"in",
"self",
".",
"sections",
"(",
")",
":",
"if",
"not",
"self",
".",
"GetOptionList",
"(",
"section",
")",
":",
"self",
".",
"remove_section",
"(",
"section",
")"
] | https://github.com/larryhastings/gilectomy/blob/4315ec3f1d6d4f813cc82ce27a24e7f784dbfc1a/Lib/idlelib/configHandler.py#L82-L86 |
||
omz/PythonistaAppTemplate | f560f93f8876d82a21d108977f90583df08d55af | PythonistaAppTemplate/PythonistaKit.framework/pylib/site-packages/reportlab/graphics/charts/barcharts.py | python | BarChart._drawFinish | (self) | return g | finalize the drawing of a barchart | finalize the drawing of a barchart | [
"finalize",
"the",
"drawing",
"of",
"a",
"barchart"
] | def _drawFinish(self):
'''finalize the drawing of a barchart'''
cA = self.categoryAxis
vA = self.valueAxis
cA.configure(self._configureData)
self.calcBarPositions()
g = Group()
zIndex = getattr(self,'zIndexOverrides',None)
if not zIndex:
g.add(self.makeBackground())
cAdgl = getattr(cA,'drawGridLast',False)
vAdgl = getattr(vA,'drawGridLast',False)
if not cAdgl: cA.makeGrid(g,parent=self, dim=vA.getGridDims)
if not vAdgl: vA.makeGrid(g,parent=self, dim=cA.getGridDims)
g.add(self.makeBars())
g.add(cA)
g.add(vA)
if cAdgl: cA.makeGrid(g,parent=self, dim=vA.getGridDims)
if vAdgl: vA.makeGrid(g,parent=self, dim=cA.getGridDims)
for a in getattr(self,'annotations',()): g.add(a(self,cA.scale,vA.scale))
else:
Z=dict(
background=0,
categoryAxis=1,
valueAxis=2,
bars=3,
barLabels=4,
categoryAxisGrid=5,
valueAxisGrid=6,
annotations=7,
)
for z in zIndex.strip().split(','):
z = z.strip()
if not z: continue
try:
k,v=z.split('=')
except:
raise ValueError('Badly formatted zIndex clause %r in %r\nallowed variables are\n%s' % (z,zIndex,'\n'.join(['%s=%r'% (k,Z[k]) for k in sorted(Z.keys())])))
if k not in Z:
raise ValueError('Unknown zIndex variable %r in %r\nallowed variables are\n%s' % (k,Z,'\n'.join(['%s=%r'% (k,Z[k]) for k in sorted(Z.keys())])))
try:
v = eval(v,{}) #only constants allowed
assert isinstance(v,(float,int))
except:
raise ValueError('Bad zIndex value %r in clause %r of zIndex\nallowed variables are\n%s' % (v,z,zIndex,'\n'.join(['%s=%r'% (k,Z[k]) for k in sorted(Z.keys())])))
Z[k] = v
Z = [(v,k) for k,v in Z.items()]
Z.sort()
b = self.makeBars()
bl = b.contents.pop(-1)
for v,k in Z:
if k=='background':
g.add(self.makeBackground())
elif k=='categoryAxis':
g.add(cA)
elif k=='categoryAxisGrid':
cA.makeGrid(g,parent=self, dim=vA.getGridDims)
elif k=='valueAxis':
g.add(vA)
elif k=='valueAxisGrid':
vA.makeGrid(g,parent=self, dim=cA.getGridDims)
elif k=='bars':
g.add(b)
elif k=='barLabels':
g.add(bl)
elif k=='annotations':
for a in getattr(self,'annotations',()): g.add(a(self,cA.scale,vA.scale))
del self._configureData
return g | [
"def",
"_drawFinish",
"(",
"self",
")",
":",
"cA",
"=",
"self",
".",
"categoryAxis",
"vA",
"=",
"self",
".",
"valueAxis",
"cA",
".",
"configure",
"(",
"self",
".",
"_configureData",
")",
"self",
".",
"calcBarPositions",
"(",
")",
"g",
"=",
"Group",
"(",
")",
"zIndex",
"=",
"getattr",
"(",
"self",
",",
"'zIndexOverrides'",
",",
"None",
")",
"if",
"not",
"zIndex",
":",
"g",
".",
"add",
"(",
"self",
".",
"makeBackground",
"(",
")",
")",
"cAdgl",
"=",
"getattr",
"(",
"cA",
",",
"'drawGridLast'",
",",
"False",
")",
"vAdgl",
"=",
"getattr",
"(",
"vA",
",",
"'drawGridLast'",
",",
"False",
")",
"if",
"not",
"cAdgl",
":",
"cA",
".",
"makeGrid",
"(",
"g",
",",
"parent",
"=",
"self",
",",
"dim",
"=",
"vA",
".",
"getGridDims",
")",
"if",
"not",
"vAdgl",
":",
"vA",
".",
"makeGrid",
"(",
"g",
",",
"parent",
"=",
"self",
",",
"dim",
"=",
"cA",
".",
"getGridDims",
")",
"g",
".",
"add",
"(",
"self",
".",
"makeBars",
"(",
")",
")",
"g",
".",
"add",
"(",
"cA",
")",
"g",
".",
"add",
"(",
"vA",
")",
"if",
"cAdgl",
":",
"cA",
".",
"makeGrid",
"(",
"g",
",",
"parent",
"=",
"self",
",",
"dim",
"=",
"vA",
".",
"getGridDims",
")",
"if",
"vAdgl",
":",
"vA",
".",
"makeGrid",
"(",
"g",
",",
"parent",
"=",
"self",
",",
"dim",
"=",
"cA",
".",
"getGridDims",
")",
"for",
"a",
"in",
"getattr",
"(",
"self",
",",
"'annotations'",
",",
"(",
")",
")",
":",
"g",
".",
"add",
"(",
"a",
"(",
"self",
",",
"cA",
".",
"scale",
",",
"vA",
".",
"scale",
")",
")",
"else",
":",
"Z",
"=",
"dict",
"(",
"background",
"=",
"0",
",",
"categoryAxis",
"=",
"1",
",",
"valueAxis",
"=",
"2",
",",
"bars",
"=",
"3",
",",
"barLabels",
"=",
"4",
",",
"categoryAxisGrid",
"=",
"5",
",",
"valueAxisGrid",
"=",
"6",
",",
"annotations",
"=",
"7",
",",
")",
"for",
"z",
"in",
"zIndex",
".",
"strip",
"(",
")",
".",
"split",
"(",
"','",
")",
":",
"z",
"=",
"z",
".",
"strip",
"(",
")",
"if",
"not",
"z",
":",
"continue",
"try",
":",
"k",
",",
"v",
"=",
"z",
".",
"split",
"(",
"'='",
")",
"except",
":",
"raise",
"ValueError",
"(",
"'Badly formatted zIndex clause %r in %r\\nallowed variables are\\n%s'",
"%",
"(",
"z",
",",
"zIndex",
",",
"'\\n'",
".",
"join",
"(",
"[",
"'%s=%r'",
"%",
"(",
"k",
",",
"Z",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"sorted",
"(",
"Z",
".",
"keys",
"(",
")",
")",
"]",
")",
")",
")",
"if",
"k",
"not",
"in",
"Z",
":",
"raise",
"ValueError",
"(",
"'Unknown zIndex variable %r in %r\\nallowed variables are\\n%s'",
"%",
"(",
"k",
",",
"Z",
",",
"'\\n'",
".",
"join",
"(",
"[",
"'%s=%r'",
"%",
"(",
"k",
",",
"Z",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"sorted",
"(",
"Z",
".",
"keys",
"(",
")",
")",
"]",
")",
")",
")",
"try",
":",
"v",
"=",
"eval",
"(",
"v",
",",
"{",
"}",
")",
"#only constants allowed",
"assert",
"isinstance",
"(",
"v",
",",
"(",
"float",
",",
"int",
")",
")",
"except",
":",
"raise",
"ValueError",
"(",
"'Bad zIndex value %r in clause %r of zIndex\\nallowed variables are\\n%s'",
"%",
"(",
"v",
",",
"z",
",",
"zIndex",
",",
"'\\n'",
".",
"join",
"(",
"[",
"'%s=%r'",
"%",
"(",
"k",
",",
"Z",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"sorted",
"(",
"Z",
".",
"keys",
"(",
")",
")",
"]",
")",
")",
")",
"Z",
"[",
"k",
"]",
"=",
"v",
"Z",
"=",
"[",
"(",
"v",
",",
"k",
")",
"for",
"k",
",",
"v",
"in",
"Z",
".",
"items",
"(",
")",
"]",
"Z",
".",
"sort",
"(",
")",
"b",
"=",
"self",
".",
"makeBars",
"(",
")",
"bl",
"=",
"b",
".",
"contents",
".",
"pop",
"(",
"-",
"1",
")",
"for",
"v",
",",
"k",
"in",
"Z",
":",
"if",
"k",
"==",
"'background'",
":",
"g",
".",
"add",
"(",
"self",
".",
"makeBackground",
"(",
")",
")",
"elif",
"k",
"==",
"'categoryAxis'",
":",
"g",
".",
"add",
"(",
"cA",
")",
"elif",
"k",
"==",
"'categoryAxisGrid'",
":",
"cA",
".",
"makeGrid",
"(",
"g",
",",
"parent",
"=",
"self",
",",
"dim",
"=",
"vA",
".",
"getGridDims",
")",
"elif",
"k",
"==",
"'valueAxis'",
":",
"g",
".",
"add",
"(",
"vA",
")",
"elif",
"k",
"==",
"'valueAxisGrid'",
":",
"vA",
".",
"makeGrid",
"(",
"g",
",",
"parent",
"=",
"self",
",",
"dim",
"=",
"cA",
".",
"getGridDims",
")",
"elif",
"k",
"==",
"'bars'",
":",
"g",
".",
"add",
"(",
"b",
")",
"elif",
"k",
"==",
"'barLabels'",
":",
"g",
".",
"add",
"(",
"bl",
")",
"elif",
"k",
"==",
"'annotations'",
":",
"for",
"a",
"in",
"getattr",
"(",
"self",
",",
"'annotations'",
",",
"(",
")",
")",
":",
"g",
".",
"add",
"(",
"a",
"(",
"self",
",",
"cA",
".",
"scale",
",",
"vA",
".",
"scale",
")",
")",
"del",
"self",
".",
"_configureData",
"return",
"g"
] | https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib/site-packages/reportlab/graphics/charts/barcharts.py#L201-L271 |
|
pandas-dev/pandas | 5ba7d714014ae8feaccc0dd4a98890828cf2832d | pandas/core/indexes/base.py | python | Index._intersection | (self, other: Index, sort=False) | return res_values | intersection specialized to the case with matching dtypes. | intersection specialized to the case with matching dtypes. | [
"intersection",
"specialized",
"to",
"the",
"case",
"with",
"matching",
"dtypes",
"."
] | def _intersection(self, other: Index, sort=False):
"""
intersection specialized to the case with matching dtypes.
"""
if self.is_monotonic and other.is_monotonic and self._can_use_libjoin:
try:
result = self._inner_indexer(other)[0]
except TypeError:
# non-comparable; should only be for object dtype
pass
else:
# TODO: algos.unique1d should preserve DTA/TDA
res = algos.unique1d(result)
return ensure_wrapped_if_datetimelike(res)
res_values = self._intersection_via_get_indexer(other, sort=sort)
res_values = _maybe_try_sort(res_values, sort)
return res_values | [
"def",
"_intersection",
"(",
"self",
",",
"other",
":",
"Index",
",",
"sort",
"=",
"False",
")",
":",
"if",
"self",
".",
"is_monotonic",
"and",
"other",
".",
"is_monotonic",
"and",
"self",
".",
"_can_use_libjoin",
":",
"try",
":",
"result",
"=",
"self",
".",
"_inner_indexer",
"(",
"other",
")",
"[",
"0",
"]",
"except",
"TypeError",
":",
"# non-comparable; should only be for object dtype",
"pass",
"else",
":",
"# TODO: algos.unique1d should preserve DTA/TDA",
"res",
"=",
"algos",
".",
"unique1d",
"(",
"result",
")",
"return",
"ensure_wrapped_if_datetimelike",
"(",
"res",
")",
"res_values",
"=",
"self",
".",
"_intersection_via_get_indexer",
"(",
"other",
",",
"sort",
"=",
"sort",
")",
"res_values",
"=",
"_maybe_try_sort",
"(",
"res_values",
",",
"sort",
")",
"return",
"res_values"
] | https://github.com/pandas-dev/pandas/blob/5ba7d714014ae8feaccc0dd4a98890828cf2832d/pandas/core/indexes/base.py#L3333-L3350 |
|
DataDog/integrations-core | 934674b29d94b70ccc008f76ea172d0cdae05e1e | oracle/datadog_checks/oracle/config_models/shared.py | python | SharedConfig._ensure_defaults | (cls, v, field) | return getattr(defaults, f'shared_{field.name}')(field, v) | [] | def _ensure_defaults(cls, v, field):
if v is not None or field.required:
return v
return getattr(defaults, f'shared_{field.name}')(field, v) | [
"def",
"_ensure_defaults",
"(",
"cls",
",",
"v",
",",
"field",
")",
":",
"if",
"v",
"is",
"not",
"None",
"or",
"field",
".",
"required",
":",
"return",
"v",
"return",
"getattr",
"(",
"defaults",
",",
"f'shared_{field.name}'",
")",
"(",
"field",
",",
"v",
")"
] | https://github.com/DataDog/integrations-core/blob/934674b29d94b70ccc008f76ea172d0cdae05e1e/oracle/datadog_checks/oracle/config_models/shared.py#L34-L38 |
|||
securesystemslab/zippy | ff0e84ac99442c2c55fe1d285332cfd4e185e089 | zippy/lib-python/3/poplib.py | python | POP3.getwelcome | (self) | return self.welcome | [] | def getwelcome(self):
return self.welcome | [
"def",
"getwelcome",
"(",
"self",
")",
":",
"return",
"self",
".",
"welcome"
] | https://github.com/securesystemslab/zippy/blob/ff0e84ac99442c2c55fe1d285332cfd4e185e089/zippy/lib-python/3/poplib.py#L167-L168 |
|||
jinfagang/alfred | dd7420d1410f82f9dadf07a30b6fad5a71168001 | alfred/modules/data/voc2yolo.py | python | voc2yolo | (img_dir, xml_dir, class_txt) | [] | def voc2yolo(img_dir, xml_dir, class_txt):
classes_names = None
if class_txt:
classes_names = [i.strip() for i in open(class_txt, 'r').readlines()]
labels_target = os.path.join(os.path.dirname(xml_dir.rstrip('/')), 'yolo_converted_from_voc')
print('labels dir to save: {}'.format(labels_target))
if not os.path.exists(labels_target):
os.makedirs(labels_target)
xmls = glob.glob(os.path.join(xml_dir, '*.xml'))
for xml in xmls:
convert_annotation(xml, labels_target, classes_names)
print('Done!')
print('class name order used is: ', classes_names) | [
"def",
"voc2yolo",
"(",
"img_dir",
",",
"xml_dir",
",",
"class_txt",
")",
":",
"classes_names",
"=",
"None",
"if",
"class_txt",
":",
"classes_names",
"=",
"[",
"i",
".",
"strip",
"(",
")",
"for",
"i",
"in",
"open",
"(",
"class_txt",
",",
"'r'",
")",
".",
"readlines",
"(",
")",
"]",
"labels_target",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"xml_dir",
".",
"rstrip",
"(",
"'/'",
")",
")",
",",
"'yolo_converted_from_voc'",
")",
"print",
"(",
"'labels dir to save: {}'",
".",
"format",
"(",
"labels_target",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"labels_target",
")",
":",
"os",
".",
"makedirs",
"(",
"labels_target",
")",
"xmls",
"=",
"glob",
".",
"glob",
"(",
"os",
".",
"path",
".",
"join",
"(",
"xml_dir",
",",
"'*.xml'",
")",
")",
"for",
"xml",
"in",
"xmls",
":",
"convert_annotation",
"(",
"xml",
",",
"labels_target",
",",
"classes_names",
")",
"print",
"(",
"'Done!'",
")",
"print",
"(",
"'class name order used is: '",
",",
"classes_names",
")"
] | https://github.com/jinfagang/alfred/blob/dd7420d1410f82f9dadf07a30b6fad5a71168001/alfred/modules/data/voc2yolo.py#L50-L64 |
||||
pyparallel/pyparallel | 11e8c6072d48c8f13641925d17b147bf36ee0ba3 | Tools/scripts/highlight.py | python | ansi_highlight | (classified_text, colors=default_ansi) | return ''.join(result) | Add syntax highlighting to source code using ANSI escape sequences | Add syntax highlighting to source code using ANSI escape sequences | [
"Add",
"syntax",
"highlighting",
"to",
"source",
"code",
"using",
"ANSI",
"escape",
"sequences"
] | def ansi_highlight(classified_text, colors=default_ansi):
'Add syntax highlighting to source code using ANSI escape sequences'
# http://en.wikipedia.org/wiki/ANSI_escape_code
result = []
for kind, text in classified_text:
opener, closer = colors.get(kind, ('', ''))
result += [opener, text, closer]
return ''.join(result) | [
"def",
"ansi_highlight",
"(",
"classified_text",
",",
"colors",
"=",
"default_ansi",
")",
":",
"# http://en.wikipedia.org/wiki/ANSI_escape_code",
"result",
"=",
"[",
"]",
"for",
"kind",
",",
"text",
"in",
"classified_text",
":",
"opener",
",",
"closer",
"=",
"colors",
".",
"get",
"(",
"kind",
",",
"(",
"''",
",",
"''",
")",
")",
"result",
"+=",
"[",
"opener",
",",
"text",
",",
"closer",
"]",
"return",
"''",
".",
"join",
"(",
"result",
")"
] | https://github.com/pyparallel/pyparallel/blob/11e8c6072d48c8f13641925d17b147bf36ee0ba3/Tools/scripts/highlight.py#L87-L94 |
|
saltstack/salt | fae5bc757ad0f1716483ce7ae180b451545c2058 | salt/modules/github.py | python | is_team_member | (name, team_name, profile="github") | return name.lower() in list_team_members(team_name, profile=profile) | Returns True if the github user is in the team with team_name, or False
otherwise.
name
The name of the user whose membership to check.
team_name
The name of the team to check membership in.
profile
The name of the profile configuration to use. Defaults to ``github``.
CLI Example:
.. code-block:: bash
salt myminion github.is_team_member 'user_name' 'team_name'
.. versionadded:: 2016.11.0 | Returns True if the github user is in the team with team_name, or False
otherwise. | [
"Returns",
"True",
"if",
"the",
"github",
"user",
"is",
"in",
"the",
"team",
"with",
"team_name",
"or",
"False",
"otherwise",
"."
] | def is_team_member(name, team_name, profile="github"):
"""
Returns True if the github user is in the team with team_name, or False
otherwise.
name
The name of the user whose membership to check.
team_name
The name of the team to check membership in.
profile
The name of the profile configuration to use. Defaults to ``github``.
CLI Example:
.. code-block:: bash
salt myminion github.is_team_member 'user_name' 'team_name'
.. versionadded:: 2016.11.0
"""
return name.lower() in list_team_members(team_name, profile=profile) | [
"def",
"is_team_member",
"(",
"name",
",",
"team_name",
",",
"profile",
"=",
"\"github\"",
")",
":",
"return",
"name",
".",
"lower",
"(",
")",
"in",
"list_team_members",
"(",
"team_name",
",",
"profile",
"=",
"profile",
")"
] | https://github.com/saltstack/salt/blob/fae5bc757ad0f1716483ce7ae180b451545c2058/salt/modules/github.py#L1495-L1517 |
|
Tencent/bk-sops | 2a6bd1573b7b42812cb8a5b00929e98ab916b18d | gcloud/template_base/domains/template_manager.py | python | TemplateManager.update | (
self, template: object, editor: str, name: str = "", pipeline_tree: str = None, description: str = "",
) | return data | 更新 template 层模板
:param template: template 对象
:type template: object
:param editor: 编辑者
:type editor: str
:param name: 模板名, defaults to ""
:type name: str, optional
:param pipeline_tree: 模板结构, defaults to None
:type pipeline_tree: str, optional
:param description: 模板描述, defaults to ""
:type description: str, optional
:return: [description]
:rtype: dict | 更新 template 层模板 | [
"更新",
"template",
"层模板"
] | def update(
self, template: object, editor: str, name: str = "", pipeline_tree: str = None, description: str = "",
) -> dict:
"""
更新 template 层模板
:param template: template 对象
:type template: object
:param editor: 编辑者
:type editor: str
:param name: 模板名, defaults to ""
:type name: str, optional
:param pipeline_tree: 模板结构, defaults to None
:type pipeline_tree: str, optional
:param description: 模板描述, defaults to ""
:type description: str, optional
:return: [description]
:rtype: dict
"""
data = self.update_pipeline(
pipeline_template=template.pipeline_template,
editor=editor,
name=name,
pipeline_tree=pipeline_tree,
description=description,
)
if not data["result"]:
return data
data["data"] = template
return data | [
"def",
"update",
"(",
"self",
",",
"template",
":",
"object",
",",
"editor",
":",
"str",
",",
"name",
":",
"str",
"=",
"\"\"",
",",
"pipeline_tree",
":",
"str",
"=",
"None",
",",
"description",
":",
"str",
"=",
"\"\"",
",",
")",
"->",
"dict",
":",
"data",
"=",
"self",
".",
"update_pipeline",
"(",
"pipeline_template",
"=",
"template",
".",
"pipeline_template",
",",
"editor",
"=",
"editor",
",",
"name",
"=",
"name",
",",
"pipeline_tree",
"=",
"pipeline_tree",
",",
"description",
"=",
"description",
",",
")",
"if",
"not",
"data",
"[",
"\"result\"",
"]",
":",
"return",
"data",
"data",
"[",
"\"data\"",
"]",
"=",
"template",
"return",
"data"
] | https://github.com/Tencent/bk-sops/blob/2a6bd1573b7b42812cb8a5b00929e98ab916b18d/gcloud/template_base/domains/template_manager.py#L200-L230 |
|
twke18/Adaptive_Affinity_Fields | 8488aa6ad16022ab4b89fb8626386997559cb951 | network/common/resnet_v1.py | python | resnet_v1_101 | (x,
name,
is_training,
use_global_status,
reuse=False) | return resnet_v1(x,
name=name,
filters=[64,128,256,512],
num_blocks=[3,4,23,3],
strides=[2,1,1,1],
dilations=[None, None, 2, 4],
is_training=is_training,
use_global_status=use_global_status,
reuse=reuse) | Builds ResNet101 v1.
Args:
x: A tensor of size [batch_size, height_in, width_in, channels].
name: The prefix of tensorflow variables defined in this network.
is_training: If the tensorflow variables defined in this layer
would be used for training.
use_global_status: enable/disable use_global_status for batch
normalization. If True, moving mean and moving variance are updated
by exponential decay.
reuse: enable/disable reuse for reusing tensorflow variables. It is
useful for sharing weight parameters across two identical networks.
Returns:
A tensor of size [batch_size, height_out, width_out, channels_out]. | Builds ResNet101 v1. | [
"Builds",
"ResNet101",
"v1",
"."
] | def resnet_v1_101(x,
name,
is_training,
use_global_status,
reuse=False):
"""Builds ResNet101 v1.
Args:
x: A tensor of size [batch_size, height_in, width_in, channels].
name: The prefix of tensorflow variables defined in this network.
is_training: If the tensorflow variables defined in this layer
would be used for training.
use_global_status: enable/disable use_global_status for batch
normalization. If True, moving mean and moving variance are updated
by exponential decay.
reuse: enable/disable reuse for reusing tensorflow variables. It is
useful for sharing weight parameters across two identical networks.
Returns:
A tensor of size [batch_size, height_out, width_out, channels_out].
"""
return resnet_v1(x,
name=name,
filters=[64,128,256,512],
num_blocks=[3,4,23,3],
strides=[2,1,1,1],
dilations=[None, None, 2, 4],
is_training=is_training,
use_global_status=use_global_status,
reuse=reuse) | [
"def",
"resnet_v1_101",
"(",
"x",
",",
"name",
",",
"is_training",
",",
"use_global_status",
",",
"reuse",
"=",
"False",
")",
":",
"return",
"resnet_v1",
"(",
"x",
",",
"name",
"=",
"name",
",",
"filters",
"=",
"[",
"64",
",",
"128",
",",
"256",
",",
"512",
"]",
",",
"num_blocks",
"=",
"[",
"3",
",",
"4",
",",
"23",
",",
"3",
"]",
",",
"strides",
"=",
"[",
"2",
",",
"1",
",",
"1",
",",
"1",
"]",
",",
"dilations",
"=",
"[",
"None",
",",
"None",
",",
"2",
",",
"4",
"]",
",",
"is_training",
"=",
"is_training",
",",
"use_global_status",
"=",
"use_global_status",
",",
"reuse",
"=",
"reuse",
")"
] | https://github.com/twke18/Adaptive_Affinity_Fields/blob/8488aa6ad16022ab4b89fb8626386997559cb951/network/common/resnet_v1.py#L208-L237 |
|
cloudera/hue | 23f02102d4547c17c32bd5ea0eb24e9eadd657a4 | desktop/core/ext-py/python-oauth2/oauth2/__init__.py | python | Server.build_authenticate_header | (self, realm='') | return {'WWW-Authenticate': 'OAuth realm="%s"' % realm} | Optional support for the authenticate header. | Optional support for the authenticate header. | [
"Optional",
"support",
"for",
"the",
"authenticate",
"header",
"."
] | def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm} | [
"def",
"build_authenticate_header",
"(",
"self",
",",
"realm",
"=",
"''",
")",
":",
"return",
"{",
"'WWW-Authenticate'",
":",
"'OAuth realm=\"%s\"'",
"%",
"realm",
"}"
] | https://github.com/cloudera/hue/blob/23f02102d4547c17c32bd5ea0eb24e9eadd657a4/desktop/core/ext-py/python-oauth2/oauth2/__init__.py#L713-L715 |
|
Yelp/paasta | 6c08c04a577359509575c794b973ea84d72accf9 | paasta_tools/utils.py | python | InstanceConfig.get_cpu_quota | (self) | return (self.get_cpus() + cpu_burst_add) * self.get_cpu_period() | Gets the --cpu-quota option to be passed to docker
Calculation: (cpus + cpus_burst_add) * cfs_period_us
:returns: The number to be passed to the --cpu-quota docker flag | Gets the --cpu-quota option to be passed to docker | [
"Gets",
"the",
"--",
"cpu",
"-",
"quota",
"option",
"to",
"be",
"passed",
"to",
"docker"
] | def get_cpu_quota(self) -> float:
"""Gets the --cpu-quota option to be passed to docker
Calculation: (cpus + cpus_burst_add) * cfs_period_us
:returns: The number to be passed to the --cpu-quota docker flag"""
cpu_burst_add = self.get_cpu_burst_add()
return (self.get_cpus() + cpu_burst_add) * self.get_cpu_period() | [
"def",
"get_cpu_quota",
"(",
"self",
")",
"->",
"float",
":",
"cpu_burst_add",
"=",
"self",
".",
"get_cpu_burst_add",
"(",
")",
"return",
"(",
"self",
".",
"get_cpus",
"(",
")",
"+",
"cpu_burst_add",
")",
"*",
"self",
".",
"get_cpu_period",
"(",
")"
] | https://github.com/Yelp/paasta/blob/6c08c04a577359509575c794b973ea84d72accf9/paasta_tools/utils.py#L468-L475 |
|
dominno/django-moderation | 6909b01dbdeee5cf40bf399394da0ceadf9dd702 | moderation/diff.py | python | get_diff_operations | (a, b) | return operations | [] | def get_diff_operations(a, b):
operations = []
a_words = re.split(r'(\W+)', a)
b_words = re.split(r'(\W+)', b)
sequence_matcher = difflib.SequenceMatcher(None, a_words, b_words)
for opcode in sequence_matcher.get_opcodes():
operation, start_a, end_a, start_b, end_b = opcode
deleted = ''.join(a_words[start_a:end_a])
inserted = ''.join(b_words[start_b:end_b])
operations.append({'operation': operation,
'deleted': deleted,
'inserted': inserted})
return operations | [
"def",
"get_diff_operations",
"(",
"a",
",",
"b",
")",
":",
"operations",
"=",
"[",
"]",
"a_words",
"=",
"re",
".",
"split",
"(",
"r'(\\W+)'",
",",
"a",
")",
"b_words",
"=",
"re",
".",
"split",
"(",
"r'(\\W+)'",
",",
"b",
")",
"sequence_matcher",
"=",
"difflib",
".",
"SequenceMatcher",
"(",
"None",
",",
"a_words",
",",
"b_words",
")",
"for",
"opcode",
"in",
"sequence_matcher",
".",
"get_opcodes",
"(",
")",
":",
"operation",
",",
"start_a",
",",
"end_a",
",",
"start_b",
",",
"end_b",
"=",
"opcode",
"deleted",
"=",
"''",
".",
"join",
"(",
"a_words",
"[",
"start_a",
":",
"end_a",
"]",
")",
"inserted",
"=",
"''",
".",
"join",
"(",
"b_words",
"[",
"start_b",
":",
"end_b",
"]",
")",
"operations",
".",
"append",
"(",
"{",
"'operation'",
":",
"operation",
",",
"'deleted'",
":",
"deleted",
",",
"'inserted'",
":",
"inserted",
"}",
")",
"return",
"operations"
] | https://github.com/dominno/django-moderation/blob/6909b01dbdeee5cf40bf399394da0ceadf9dd702/moderation/diff.py#L99-L113 |
|||
LexPredict/lexpredict-contraxsuite | 1d5a2540d31f8f3f1adc442cfa13a7c007319899 | sdk/python/sdk/openapi_client/model/document_type_import_response.py | python | DocumentTypeImportResponse.__init__ | (self, task_id, *args, **kwargs) | DocumentTypeImportResponse - a model defined in OpenAPI
Args:
task_id (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,) | DocumentTypeImportResponse - a model defined in OpenAPI | [
"DocumentTypeImportResponse",
"-",
"a",
"model",
"defined",
"in",
"OpenAPI"
] | def __init__(self, task_id, *args, **kwargs): # noqa: E501
"""DocumentTypeImportResponse - a model defined in OpenAPI
Args:
task_id (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.task_id = task_id
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | [
"def",
"__init__",
"(",
"self",
",",
"task_id",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# noqa: E501",
"_check_type",
"=",
"kwargs",
".",
"pop",
"(",
"'_check_type'",
",",
"True",
")",
"_spec_property_naming",
"=",
"kwargs",
".",
"pop",
"(",
"'_spec_property_naming'",
",",
"False",
")",
"_path_to_item",
"=",
"kwargs",
".",
"pop",
"(",
"'_path_to_item'",
",",
"(",
")",
")",
"_configuration",
"=",
"kwargs",
".",
"pop",
"(",
"'_configuration'",
",",
"None",
")",
"_visited_composed_classes",
"=",
"kwargs",
".",
"pop",
"(",
"'_visited_composed_classes'",
",",
"(",
")",
")",
"if",
"args",
":",
"raise",
"ApiTypeError",
"(",
"\"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.\"",
"%",
"(",
"args",
",",
"self",
".",
"__class__",
".",
"__name__",
",",
")",
",",
"path_to_item",
"=",
"_path_to_item",
",",
"valid_classes",
"=",
"(",
"self",
".",
"__class__",
",",
")",
",",
")",
"self",
".",
"_data_store",
"=",
"{",
"}",
"self",
".",
"_check_type",
"=",
"_check_type",
"self",
".",
"_spec_property_naming",
"=",
"_spec_property_naming",
"self",
".",
"_path_to_item",
"=",
"_path_to_item",
"self",
".",
"_configuration",
"=",
"_configuration",
"self",
".",
"_visited_composed_classes",
"=",
"_visited_composed_classes",
"+",
"(",
"self",
".",
"__class__",
",",
")",
"self",
".",
"task_id",
"=",
"task_id",
"for",
"var_name",
",",
"var_value",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"var_name",
"not",
"in",
"self",
".",
"attribute_map",
"and",
"self",
".",
"_configuration",
"is",
"not",
"None",
"and",
"self",
".",
"_configuration",
".",
"discard_unknown_keys",
"and",
"self",
".",
"additional_properties_type",
"is",
"None",
":",
"# discard variable.",
"continue",
"setattr",
"(",
"self",
",",
"var_name",
",",
"var_value",
")",
"if",
"var_name",
"in",
"self",
".",
"read_only_vars",
":",
"raise",
"ApiAttributeError",
"(",
"f\"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate \"",
"f\"class with read only attributes.\"",
")"
] | https://github.com/LexPredict/lexpredict-contraxsuite/blob/1d5a2540d31f8f3f1adc442cfa13a7c007319899/sdk/python/sdk/openapi_client/model/document_type_import_response.py#L188-L261 |
||
bastula/dicompyler | 2643e0ee145cb7c699b3d36e3e4f07ac9dc7b1f2 | dicompyler/baseplugins/2dview.py | python | plugin2DView.OnDestroy | (self, evt) | Unbind to all events before the plugin is destroyed. | Unbind to all events before the plugin is destroyed. | [
"Unbind",
"to",
"all",
"events",
"before",
"the",
"plugin",
"is",
"destroyed",
"."
] | def OnDestroy(self, evt):
"""Unbind to all events before the plugin is destroyed."""
pub.unsubscribe(self.OnUpdatePatient, 'patient.updated.parsed_data')
pub.unsubscribe(self.OnStructureCheck, 'structures.checked')
pub.unsubscribe(self.OnIsodoseCheck, 'isodoses.checked')
pub.unsubscribe(self.OnDrawingPrefsChange, '2dview.drawingprefs')
pub.unsubscribe(self.OnPluginLoaded, 'plugin.loaded.2dview') | [
"def",
"OnDestroy",
"(",
"self",
",",
"evt",
")",
":",
"pub",
".",
"unsubscribe",
"(",
"self",
".",
"OnUpdatePatient",
",",
"'patient.updated.parsed_data'",
")",
"pub",
".",
"unsubscribe",
"(",
"self",
".",
"OnStructureCheck",
",",
"'structures.checked'",
")",
"pub",
".",
"unsubscribe",
"(",
"self",
".",
"OnIsodoseCheck",
",",
"'isodoses.checked'",
")",
"pub",
".",
"unsubscribe",
"(",
"self",
".",
"OnDrawingPrefsChange",
",",
"'2dview.drawingprefs'",
")",
"pub",
".",
"unsubscribe",
"(",
"self",
".",
"OnPluginLoaded",
",",
"'plugin.loaded.2dview'",
")"
] | https://github.com/bastula/dicompyler/blob/2643e0ee145cb7c699b3d36e3e4f07ac9dc7b1f2/dicompyler/baseplugins/2dview.py#L200-L207 |
||
omz/PythonistaAppTemplate | f560f93f8876d82a21d108977f90583df08d55af | PythonistaAppTemplate/PythonistaKit.framework/pylib/site-packages/sqlalchemy/engine/interfaces.py | python | ExecutionContext.handle_dbapi_exception | (self, e) | Receive a DBAPI exception which occurred upon execute, result
fetch, etc. | Receive a DBAPI exception which occurred upon execute, result
fetch, etc. | [
"Receive",
"a",
"DBAPI",
"exception",
"which",
"occurred",
"upon",
"execute",
"result",
"fetch",
"etc",
"."
] | def handle_dbapi_exception(self, e):
"""Receive a DBAPI exception which occurred upon execute, result
fetch, etc."""
raise NotImplementedError() | [
"def",
"handle_dbapi_exception",
"(",
"self",
",",
"e",
")",
":",
"raise",
"NotImplementedError",
"(",
")"
] | https://github.com/omz/PythonistaAppTemplate/blob/f560f93f8876d82a21d108977f90583df08d55af/PythonistaAppTemplate/PythonistaKit.framework/pylib/site-packages/sqlalchemy/engine/interfaces.py#L787-L791 |
||
osmr/imgclsmob | f2993d3ce73a2f7ddba05da3891defb08547d504 | chainer_/chainercv2/models/irevnet.py | python | get_irevnet | (blocks,
model_name=None,
pretrained=False,
root=os.path.join("~", ".chainer", "models"),
**kwargs) | return net | Create i-RevNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters. | Create i-RevNet model with specific parameters. | [
"Create",
"i",
"-",
"RevNet",
"model",
"with",
"specific",
"parameters",
"."
] | def get_irevnet(blocks,
model_name=None,
pretrained=False,
root=os.path.join("~", ".chainer", "models"),
**kwargs):
"""
Create i-RevNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
if blocks == 301:
layers = [6, 16, 72, 6]
else:
raise ValueError("Unsupported i-RevNet with number of blocks: {}".format(blocks))
assert (sum(layers) * 3 + 1 == blocks)
channels_per_layers = [24, 96, 384, 1536]
init_block_channels = 12
final_block_channels = 3072
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = IRevNet(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
load_npz(
file=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
obj=net)
return net | [
"def",
"get_irevnet",
"(",
"blocks",
",",
"model_name",
"=",
"None",
",",
"pretrained",
"=",
"False",
",",
"root",
"=",
"os",
".",
"path",
".",
"join",
"(",
"\"~\"",
",",
"\".chainer\"",
",",
"\"models\"",
")",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"blocks",
"==",
"301",
":",
"layers",
"=",
"[",
"6",
",",
"16",
",",
"72",
",",
"6",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unsupported i-RevNet with number of blocks: {}\"",
".",
"format",
"(",
"blocks",
")",
")",
"assert",
"(",
"sum",
"(",
"layers",
")",
"*",
"3",
"+",
"1",
"==",
"blocks",
")",
"channels_per_layers",
"=",
"[",
"24",
",",
"96",
",",
"384",
",",
"1536",
"]",
"init_block_channels",
"=",
"12",
"final_block_channels",
"=",
"3072",
"channels",
"=",
"[",
"[",
"ci",
"]",
"*",
"li",
"for",
"(",
"ci",
",",
"li",
")",
"in",
"zip",
"(",
"channels_per_layers",
",",
"layers",
")",
"]",
"net",
"=",
"IRevNet",
"(",
"channels",
"=",
"channels",
",",
"init_block_channels",
"=",
"init_block_channels",
",",
"final_block_channels",
"=",
"final_block_channels",
",",
"*",
"*",
"kwargs",
")",
"if",
"pretrained",
":",
"if",
"(",
"model_name",
"is",
"None",
")",
"or",
"(",
"not",
"model_name",
")",
":",
"raise",
"ValueError",
"(",
"\"Parameter `model_name` should be properly initialized for loading pretrained model.\"",
")",
"from",
".",
"model_store",
"import",
"get_model_file",
"load_npz",
"(",
"file",
"=",
"get_model_file",
"(",
"model_name",
"=",
"model_name",
",",
"local_model_store_dir_path",
"=",
"root",
")",
",",
"obj",
"=",
"net",
")",
"return",
"net"
] | https://github.com/osmr/imgclsmob/blob/f2993d3ce73a2f7ddba05da3891defb08547d504/chainer_/chainercv2/models/irevnet.py#L387-L436 |
|
radlab/sparrow | afb8efadeb88524f1394d1abe4ea66c6fd2ac744 | simulation/simulation_hacked.py | python | StatsManager.output_load_versus_launch_time | (self) | Outputs the predicted load and launch time for each task.
This information is intended to help evaluate the staleness of the
load information from the probe. If the information is quite stale,
we'd expect to see little correlation between the load and the launch
time of the task. | Outputs the predicted load and launch time for each task.
This information is intended to help evaluate the staleness of the
load information from the probe. If the information is quite stale,
we'd expect to see little correlation between the load and the launch
time of the task. | [
"Outputs",
"the",
"predicted",
"load",
"and",
"launch",
"time",
"for",
"each",
"task",
".",
"This",
"information",
"is",
"intended",
"to",
"help",
"evaluate",
"the",
"staleness",
"of",
"the",
"load",
"information",
"from",
"the",
"probe",
".",
"If",
"the",
"information",
"is",
"quite",
"stale",
"we",
"d",
"expect",
"to",
"see",
"little",
"correlation",
"between",
"the",
"load",
"and",
"the",
"launch",
"time",
"of",
"the",
"task",
"."
] | def output_load_versus_launch_time(self):
""" Outputs the predicted load and launch time for each task.
This information is intended to help evaluate the staleness of the
load information from the probe. If the information is quite stale,
we'd expect to see little correlation between the load and the launch
time of the task.
"""
results_dirname = get_param("results_dir")
per_task_filename = os.path.join(results_dirname,
"%s_task_load_vs_wait" %
get_param("file_prefix"))
per_task_file = open(per_task_filename, "w")
per_task_file.write("load\twait_time\n")
per_job_filename = os.path.join(results_dirname,
"%s_job_load_vs_wait" %
get_param("file_prefix"))
per_job_file = open(per_job_filename, "w")
per_job_file.write("load\twait_time\n")
for job in self.completed_jobs:
# Launch time and expected load for the last task to launch.
longest_task_wait = -1
longest_task_load = -1
for task_id in range(job.num_tasks):
load = job.probe_results[task_id]
wait = job.wait_times[task_id]
if wait > longest_task_wait:
longest_task_wait = wait
longest_task_load = load
per_task_file.write("%f\t%f\n" % (load, wait))
per_job_file.write("%f\t%f\n" % (longest_task_load,
longest_task_wait))
per_job_file.close()
per_task_file.close() | [
"def",
"output_load_versus_launch_time",
"(",
"self",
")",
":",
"results_dirname",
"=",
"get_param",
"(",
"\"results_dir\"",
")",
"per_task_filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"results_dirname",
",",
"\"%s_task_load_vs_wait\"",
"%",
"get_param",
"(",
"\"file_prefix\"",
")",
")",
"per_task_file",
"=",
"open",
"(",
"per_task_filename",
",",
"\"w\"",
")",
"per_task_file",
".",
"write",
"(",
"\"load\\twait_time\\n\"",
")",
"per_job_filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"results_dirname",
",",
"\"%s_job_load_vs_wait\"",
"%",
"get_param",
"(",
"\"file_prefix\"",
")",
")",
"per_job_file",
"=",
"open",
"(",
"per_job_filename",
",",
"\"w\"",
")",
"per_job_file",
".",
"write",
"(",
"\"load\\twait_time\\n\"",
")",
"for",
"job",
"in",
"self",
".",
"completed_jobs",
":",
"# Launch time and expected load for the last task to launch.",
"longest_task_wait",
"=",
"-",
"1",
"longest_task_load",
"=",
"-",
"1",
"for",
"task_id",
"in",
"range",
"(",
"job",
".",
"num_tasks",
")",
":",
"load",
"=",
"job",
".",
"probe_results",
"[",
"task_id",
"]",
"wait",
"=",
"job",
".",
"wait_times",
"[",
"task_id",
"]",
"if",
"wait",
">",
"longest_task_wait",
":",
"longest_task_wait",
"=",
"wait",
"longest_task_load",
"=",
"load",
"per_task_file",
".",
"write",
"(",
"\"%f\\t%f\\n\"",
"%",
"(",
"load",
",",
"wait",
")",
")",
"per_job_file",
".",
"write",
"(",
"\"%f\\t%f\\n\"",
"%",
"(",
"longest_task_load",
",",
"longest_task_wait",
")",
")",
"per_job_file",
".",
"close",
"(",
")",
"per_task_file",
".",
"close",
"(",
")"
] | https://github.com/radlab/sparrow/blob/afb8efadeb88524f1394d1abe4ea66c6fd2ac744/simulation/simulation_hacked.py#L805-L840 |
||
pyparsing/pyparsing | 1ccf846394a055924b810faaf9628dac53633848 | examples/pymicko.py | python | SymbolTable.insert_function | (self, fname, ftype) | return index | Inserts a new function | Inserts a new function | [
"Inserts",
"a",
"new",
"function"
] | def insert_function(self, fname, ftype):
"Inserts a new function"
index = self.insert_id(
fname,
SharedData.KINDS.FUNCTION,
[SharedData.KINDS.GLOBAL_VAR, SharedData.KINDS.FUNCTION],
ftype,
)
self.table[index].set_attribute("Params", 0)
return index | [
"def",
"insert_function",
"(",
"self",
",",
"fname",
",",
"ftype",
")",
":",
"index",
"=",
"self",
".",
"insert_id",
"(",
"fname",
",",
"SharedData",
".",
"KINDS",
".",
"FUNCTION",
",",
"[",
"SharedData",
".",
"KINDS",
".",
"GLOBAL_VAR",
",",
"SharedData",
".",
"KINDS",
".",
"FUNCTION",
"]",
",",
"ftype",
",",
")",
"self",
".",
"table",
"[",
"index",
"]",
".",
"set_attribute",
"(",
"\"Params\"",
",",
"0",
")",
"return",
"index"
] | https://github.com/pyparsing/pyparsing/blob/1ccf846394a055924b810faaf9628dac53633848/examples/pymicko.py#L523-L532 |
|
ACCLAB/DABEST-python | 3ac87685a6c0859f731e9c9107bef8f32e39a61d | dabest/_classes.py | python | TwoGroupsEffectSize.__init__ | (self, control, test, effect_size,
is_paired=False, ci=95,
resamples=5000,
permutation_count=5000,
random_seed=12345) | Compute the effect size between two groups.
Parameters
----------
control : array-like
test : array-like
These should be numerical iterables.
effect_size : string.
Any one of the following are accepted inputs:
'mean_diff', 'median_diff', 'cohens_d', 'hedges_g', or 'cliffs_delta'
is_paired : boolean, default False
resamples : int, default 5000
The number of bootstrap resamples to be taken for the calculation
of the confidence interval limits.
permutation_count : int, default 5000
The number of permutations (reshuffles) to perform for the
computation of the permutation p-value
ci : float, default 95
The confidence interval width. The default of 95 produces 95%
confidence intervals.
random_seed : int, default 12345
`random_seed` is used to seed the random number generator during
bootstrap resampling. This ensures that the confidence intervals
reported are replicable.
Returns
-------
A :py:class:`TwoGroupEffectSize` object.
difference : float
The effect size of the difference between the control and the test.
effect_size : string
The type of effect size reported.
is_paired : boolean
Whether or not the difference is paired (ie. repeated measures).
ci : float
Returns the width of the confidence interval, in percent.
alpha : float
Returns the significance level of the statistical test as a float
between 0 and 1.
resamples : int
The number of resamples performed during the bootstrap procedure.
bootstraps : nmupy ndarray
The generated bootstraps of the effect size.
random_seed : int
The number used to initialise the numpy random seed generator, ie.
`seed_value` from `numpy.random.seed(seed_value)` is returned.
bca_low, bca_high : float
The bias-corrected and accelerated confidence interval lower limit
and upper limits, respectively.
pct_low, pct_high : float
The percentile confidence interval lower limit and upper limits,
respectively.
Examples
--------
>>> import numpy as np
>>> import scipy as sp
>>> import dabest
>>> np.random.seed(12345)
>>> control = sp.stats.norm.rvs(loc=0, size=30)
>>> test = sp.stats.norm.rvs(loc=0.5, size=30)
>>> effsize = dabest.TwoGroupsEffectSize(control, test, "mean_diff")
>>> effsize
The unpaired mean difference is -0.253 [95%CI -0.782, 0.241]
5000 bootstrap samples. The confidence interval is bias-corrected
and accelerated.
>>> effsize.to_dict()
{'alpha': 0.05,
'bca_high': 0.24951887238295106,
'bca_interval_idx': (125, 4875),
'bca_low': -0.7801782111071534,
'bootstraps': array([-1.25579022, -1.20979484, -1.17604415, ..., 0.57700183,
0.5902485 , 0.61043212]),
'ci': 95,
'difference': -0.25315417702752846,
'effect_size': 'mean difference',
'is_paired': False,
'pct_high': 0.24951887238295106,
'pct_interval_idx': (125, 4875),
'pct_low': -0.7801782111071534,
'permutation_count': 5000,
'pvalue_brunner_munzel': nan,
'pvalue_kruskal': nan,
'pvalue_mann_whitney': 0.5201446121616038,
'pvalue_paired_students_t': nan,
'pvalue_permutation': 0.3484,
'pvalue_students_t': 0.34743913903372836,
'pvalue_welch': 0.3474493875548965,
'pvalue_wilcoxon': nan,
'random_seed': 12345,
'resamples': 5000,
'statistic_brunner_munzel': nan,
'statistic_kruskal': nan,
'statistic_mann_whitney': 494.0,
'statistic_paired_students_t': nan,
'statistic_students_t': 0.9472545159069105,
'statistic_welch': 0.9472545159069105,
'statistic_wilcoxon': nan} | Compute the effect size between two groups. | [
"Compute",
"the",
"effect",
"size",
"between",
"two",
"groups",
"."
] | def __init__(self, control, test, effect_size,
is_paired=False, ci=95,
resamples=5000,
permutation_count=5000,
random_seed=12345):
"""
Compute the effect size between two groups.
Parameters
----------
control : array-like
test : array-like
These should be numerical iterables.
effect_size : string.
Any one of the following are accepted inputs:
'mean_diff', 'median_diff', 'cohens_d', 'hedges_g', or 'cliffs_delta'
is_paired : boolean, default False
resamples : int, default 5000
The number of bootstrap resamples to be taken for the calculation
of the confidence interval limits.
permutation_count : int, default 5000
The number of permutations (reshuffles) to perform for the
computation of the permutation p-value
ci : float, default 95
The confidence interval width. The default of 95 produces 95%
confidence intervals.
random_seed : int, default 12345
`random_seed` is used to seed the random number generator during
bootstrap resampling. This ensures that the confidence intervals
reported are replicable.
Returns
-------
A :py:class:`TwoGroupEffectSize` object.
difference : float
The effect size of the difference between the control and the test.
effect_size : string
The type of effect size reported.
is_paired : boolean
Whether or not the difference is paired (ie. repeated measures).
ci : float
Returns the width of the confidence interval, in percent.
alpha : float
Returns the significance level of the statistical test as a float
between 0 and 1.
resamples : int
The number of resamples performed during the bootstrap procedure.
bootstraps : nmupy ndarray
The generated bootstraps of the effect size.
random_seed : int
The number used to initialise the numpy random seed generator, ie.
`seed_value` from `numpy.random.seed(seed_value)` is returned.
bca_low, bca_high : float
The bias-corrected and accelerated confidence interval lower limit
and upper limits, respectively.
pct_low, pct_high : float
The percentile confidence interval lower limit and upper limits,
respectively.
Examples
--------
>>> import numpy as np
>>> import scipy as sp
>>> import dabest
>>> np.random.seed(12345)
>>> control = sp.stats.norm.rvs(loc=0, size=30)
>>> test = sp.stats.norm.rvs(loc=0.5, size=30)
>>> effsize = dabest.TwoGroupsEffectSize(control, test, "mean_diff")
>>> effsize
The unpaired mean difference is -0.253 [95%CI -0.782, 0.241]
5000 bootstrap samples. The confidence interval is bias-corrected
and accelerated.
>>> effsize.to_dict()
{'alpha': 0.05,
'bca_high': 0.24951887238295106,
'bca_interval_idx': (125, 4875),
'bca_low': -0.7801782111071534,
'bootstraps': array([-1.25579022, -1.20979484, -1.17604415, ..., 0.57700183,
0.5902485 , 0.61043212]),
'ci': 95,
'difference': -0.25315417702752846,
'effect_size': 'mean difference',
'is_paired': False,
'pct_high': 0.24951887238295106,
'pct_interval_idx': (125, 4875),
'pct_low': -0.7801782111071534,
'permutation_count': 5000,
'pvalue_brunner_munzel': nan,
'pvalue_kruskal': nan,
'pvalue_mann_whitney': 0.5201446121616038,
'pvalue_paired_students_t': nan,
'pvalue_permutation': 0.3484,
'pvalue_students_t': 0.34743913903372836,
'pvalue_welch': 0.3474493875548965,
'pvalue_wilcoxon': nan,
'random_seed': 12345,
'resamples': 5000,
'statistic_brunner_munzel': nan,
'statistic_kruskal': nan,
'statistic_mann_whitney': 494.0,
'statistic_paired_students_t': nan,
'statistic_students_t': 0.9472545159069105,
'statistic_welch': 0.9472545159069105,
'statistic_wilcoxon': nan}
"""
import numpy as np
from numpy import array, isnan, isinf
from numpy import sort as npsort
from numpy.random import choice, seed
import scipy.stats as spstats
# import statsmodels.stats.power as power
from string import Template
import warnings
from ._stats_tools import confint_2group_diff as ci2g
from ._stats_tools import effsize as es
self.__EFFECT_SIZE_DICT = {"mean_diff" : "mean difference",
"median_diff" : "median difference",
"cohens_d" : "Cohen's d",
"hedges_g" : "Hedges' g",
"cliffs_delta" : "Cliff's delta"}
kosher_es = [a for a in self.__EFFECT_SIZE_DICT.keys()]
if effect_size not in kosher_es:
err1 = "The effect size '{}'".format(effect_size)
err2 = "is not one of {}".format(kosher_es)
raise ValueError(" ".join([err1, err2]))
if effect_size == "cliffs_delta" and is_paired is True:
err1 = "`paired` is True; therefore Cliff's delta is not defined."
raise ValueError(err1)
# Convert to numpy arrays for speed.
# NaNs are automatically dropped.
control = array(control)
test = array(test)
control = control[~isnan(control)]
test = test[~isnan(test)]
self.__effect_size = effect_size
self.__control = control
self.__test = test
self.__is_paired = is_paired
self.__resamples = resamples
self.__permutation_count = permutation_count
self.__random_seed = random_seed
self.__ci = ci
self.__alpha = ci2g._compute_alpha_from_ci(ci)
self.__difference = es.two_group_difference(
control, test, is_paired, effect_size)
self.__jackknives = ci2g.compute_meandiff_jackknife(
control, test, is_paired, effect_size)
self.__acceleration_value = ci2g._calc_accel(self.__jackknives)
bootstraps = ci2g.compute_bootstrapped_diff(
control, test, is_paired, effect_size,
resamples, random_seed)
self.__bootstraps = npsort(bootstraps)
# Added in v0.2.6.
# Raises a UserWarning if there are any infiinities in the bootstraps.
num_infinities = len(self.__bootstraps[isinf(self.__bootstraps)])
if num_infinities > 0:
warn_msg = "There are {} bootstrap(s) that are not defined. "\
"This is likely due to smaple sample sizes. "\
"The values in a bootstrap for a group will be more likely "\
"to be all equal, with a resulting variance of zero. "\
"The computation of Cohen's d and Hedges' g thus "\
"involved a division by zero. "
warnings.warn(warn_msg.format(num_infinities),
category=UserWarning)
self.__bias_correction = ci2g.compute_meandiff_bias_correction(
self.__bootstraps, self.__difference)
# Compute BCa intervals.
bca_idx_low, bca_idx_high = ci2g.compute_interval_limits(
self.__bias_correction, self.__acceleration_value,
self.__resamples, ci)
self.__bca_interval_idx = (bca_idx_low, bca_idx_high)
if ~isnan(bca_idx_low) and ~isnan(bca_idx_high):
self.__bca_low = self.__bootstraps[bca_idx_low]
self.__bca_high = self.__bootstraps[bca_idx_high]
err1 = "The $lim_type limit of the interval"
err2 = "was in the $loc 10 values."
err3 = "The result should be considered unstable."
err_temp = Template(" ".join([err1, err2, err3]))
if bca_idx_low <= 10:
warnings.warn(err_temp.substitute(lim_type="lower",
loc="bottom"),
stacklevel=1)
if bca_idx_high >= resamples-9:
warnings.warn(err_temp.substitute(lim_type="upper",
loc="top"),
stacklevel=1)
else:
err1 = "The $lim_type limit of the BCa interval cannot be computed."
err2 = "It is set to the effect size itself."
err3 = "All bootstrap values were likely all the same."
err_temp = Template(" ".join([err1, err2, err3]))
if isnan(bca_idx_low):
self.__bca_low = self.__difference
warnings.warn(err_temp.substitute(lim_type="lower"),
stacklevel=0)
if isnan(bca_idx_high):
self.__bca_high = self.__difference
warnings.warn(err_temp.substitute(lim_type="upper"),
stacklevel=0)
# Compute percentile intervals.
pct_idx_low = int((self.__alpha/2) * resamples)
pct_idx_high = int((1-(self.__alpha/2)) * resamples)
self.__pct_interval_idx = (pct_idx_low, pct_idx_high)
self.__pct_low = self.__bootstraps[pct_idx_low]
self.__pct_high = self.__bootstraps[pct_idx_high]
# Perform statistical tests.
self.__PermutationTest_result = PermutationTest(control, test,
effect_size,
is_paired,
permutation_count)
if is_paired is True:
# Wilcoxon, a non-parametric version of the paired T-test.
wilcoxon = spstats.wilcoxon(control, test)
self.__pvalue_wilcoxon = wilcoxon.pvalue
self.__statistic_wilcoxon = wilcoxon.statistic
# Introduced in v0.2.8, removed in v0.3.0 for performance issues.
# lqrt_result = lqrt.lqrtest_rel(control, test,
# random_state=random_seed)
# self.__pvalue_paired_lqrt = lqrt_result.pvalue
# self.__statistic_paired_lqrt = lqrt_result.statistic
if effect_size != "median_diff":
# Paired Student's t-test.
paired_t = spstats.ttest_rel(control, test, nan_policy='omit')
self.__pvalue_paired_students_t = paired_t.pvalue
self.__statistic_paired_students_t = paired_t.statistic
standardized_es = es.cohens_d(control, test, is_paired=True)
# self.__power = power.tt_solve_power(standardized_es,
# len(control),
# alpha=self.__alpha)
elif effect_size == "cliffs_delta":
# Let's go with Brunner-Munzel!
brunner_munzel = spstats.brunnermunzel(control, test,
nan_policy='omit')
self.__pvalue_brunner_munzel = brunner_munzel.pvalue
self.__statistic_brunner_munzel = brunner_munzel.statistic
elif effect_size == "median_diff":
# According to scipy's documentation of the function,
# "The Kruskal-Wallis H-test tests the null hypothesis
# that the population median of all of the groups are equal."
kruskal = spstats.kruskal(control, test, nan_policy='omit')
self.__pvalue_kruskal = kruskal.pvalue
self.__statistic_kruskal = kruskal.statistic
# self.__power = np.nan
else: # for mean difference, Cohen's d, and Hedges' g.
# Welch's t-test, assumes normality of distributions,
# but does not assume equal variances.
welch = spstats.ttest_ind(control, test, equal_var=False,
nan_policy='omit')
self.__pvalue_welch = welch.pvalue
self.__statistic_welch = welch.statistic
# Student's t-test, assumes normality of distributions,
# as well as assumption of equal variances.
students_t = spstats.ttest_ind(control, test, equal_var=True,
nan_policy='omit')
self.__pvalue_students_t = students_t.pvalue
self.__statistic_students_t = students_t.statistic
# Mann-Whitney test: Non parametric,
# does not assume normality of distributions
try:
mann_whitney = spstats.mannwhitneyu(control, test,
alternative='two-sided')
self.__pvalue_mann_whitney = mann_whitney.pvalue
self.__statistic_mann_whitney = mann_whitney.statistic
except ValueError:
# Occurs when the control and test are exactly identical
# in terms of rank (eg. all zeros.)
pass
# Introduced in v0.2.8, removed in v0.3.0 for performance issues.
# # Likelihood Q-Ratio test:
# lqrt_equal_var_result = lqrt.lqrtest_ind(control, test,
# random_state=random_seed,
# equal_var=True)
# self.__pvalue_lqrt_equal_var = lqrt_equal_var_result.pvalue
# self.__statistic_lqrt_equal_var = lqrt_equal_var_result.statistic
# lqrt_unequal_var_result = lqrt.lqrtest_ind(control, test,
# random_state=random_seed,
# equal_var=False)
# self.__pvalue_lqrt_unequal_var = lqrt_unequal_var_result.pvalue
# self.__statistic_lqrt_unequal_var = lqrt_unequal_var_result.statistic
standardized_es = es.cohens_d(control, test, is_paired=False) | [
"def",
"__init__",
"(",
"self",
",",
"control",
",",
"test",
",",
"effect_size",
",",
"is_paired",
"=",
"False",
",",
"ci",
"=",
"95",
",",
"resamples",
"=",
"5000",
",",
"permutation_count",
"=",
"5000",
",",
"random_seed",
"=",
"12345",
")",
":",
"import",
"numpy",
"as",
"np",
"from",
"numpy",
"import",
"array",
",",
"isnan",
",",
"isinf",
"from",
"numpy",
"import",
"sort",
"as",
"npsort",
"from",
"numpy",
".",
"random",
"import",
"choice",
",",
"seed",
"import",
"scipy",
".",
"stats",
"as",
"spstats",
"# import statsmodels.stats.power as power",
"from",
"string",
"import",
"Template",
"import",
"warnings",
"from",
".",
"_stats_tools",
"import",
"confint_2group_diff",
"as",
"ci2g",
"from",
".",
"_stats_tools",
"import",
"effsize",
"as",
"es",
"self",
".",
"__EFFECT_SIZE_DICT",
"=",
"{",
"\"mean_diff\"",
":",
"\"mean difference\"",
",",
"\"median_diff\"",
":",
"\"median difference\"",
",",
"\"cohens_d\"",
":",
"\"Cohen's d\"",
",",
"\"hedges_g\"",
":",
"\"Hedges' g\"",
",",
"\"cliffs_delta\"",
":",
"\"Cliff's delta\"",
"}",
"kosher_es",
"=",
"[",
"a",
"for",
"a",
"in",
"self",
".",
"__EFFECT_SIZE_DICT",
".",
"keys",
"(",
")",
"]",
"if",
"effect_size",
"not",
"in",
"kosher_es",
":",
"err1",
"=",
"\"The effect size '{}'\"",
".",
"format",
"(",
"effect_size",
")",
"err2",
"=",
"\"is not one of {}\"",
".",
"format",
"(",
"kosher_es",
")",
"raise",
"ValueError",
"(",
"\" \"",
".",
"join",
"(",
"[",
"err1",
",",
"err2",
"]",
")",
")",
"if",
"effect_size",
"==",
"\"cliffs_delta\"",
"and",
"is_paired",
"is",
"True",
":",
"err1",
"=",
"\"`paired` is True; therefore Cliff's delta is not defined.\"",
"raise",
"ValueError",
"(",
"err1",
")",
"# Convert to numpy arrays for speed.",
"# NaNs are automatically dropped.",
"control",
"=",
"array",
"(",
"control",
")",
"test",
"=",
"array",
"(",
"test",
")",
"control",
"=",
"control",
"[",
"~",
"isnan",
"(",
"control",
")",
"]",
"test",
"=",
"test",
"[",
"~",
"isnan",
"(",
"test",
")",
"]",
"self",
".",
"__effect_size",
"=",
"effect_size",
"self",
".",
"__control",
"=",
"control",
"self",
".",
"__test",
"=",
"test",
"self",
".",
"__is_paired",
"=",
"is_paired",
"self",
".",
"__resamples",
"=",
"resamples",
"self",
".",
"__permutation_count",
"=",
"permutation_count",
"self",
".",
"__random_seed",
"=",
"random_seed",
"self",
".",
"__ci",
"=",
"ci",
"self",
".",
"__alpha",
"=",
"ci2g",
".",
"_compute_alpha_from_ci",
"(",
"ci",
")",
"self",
".",
"__difference",
"=",
"es",
".",
"two_group_difference",
"(",
"control",
",",
"test",
",",
"is_paired",
",",
"effect_size",
")",
"self",
".",
"__jackknives",
"=",
"ci2g",
".",
"compute_meandiff_jackknife",
"(",
"control",
",",
"test",
",",
"is_paired",
",",
"effect_size",
")",
"self",
".",
"__acceleration_value",
"=",
"ci2g",
".",
"_calc_accel",
"(",
"self",
".",
"__jackknives",
")",
"bootstraps",
"=",
"ci2g",
".",
"compute_bootstrapped_diff",
"(",
"control",
",",
"test",
",",
"is_paired",
",",
"effect_size",
",",
"resamples",
",",
"random_seed",
")",
"self",
".",
"__bootstraps",
"=",
"npsort",
"(",
"bootstraps",
")",
"# Added in v0.2.6.",
"# Raises a UserWarning if there are any infiinities in the bootstraps.",
"num_infinities",
"=",
"len",
"(",
"self",
".",
"__bootstraps",
"[",
"isinf",
"(",
"self",
".",
"__bootstraps",
")",
"]",
")",
"if",
"num_infinities",
">",
"0",
":",
"warn_msg",
"=",
"\"There are {} bootstrap(s) that are not defined. \"",
"\"This is likely due to smaple sample sizes. \"",
"\"The values in a bootstrap for a group will be more likely \"",
"\"to be all equal, with a resulting variance of zero. \"",
"\"The computation of Cohen's d and Hedges' g thus \"",
"\"involved a division by zero. \"",
"warnings",
".",
"warn",
"(",
"warn_msg",
".",
"format",
"(",
"num_infinities",
")",
",",
"category",
"=",
"UserWarning",
")",
"self",
".",
"__bias_correction",
"=",
"ci2g",
".",
"compute_meandiff_bias_correction",
"(",
"self",
".",
"__bootstraps",
",",
"self",
".",
"__difference",
")",
"# Compute BCa intervals.",
"bca_idx_low",
",",
"bca_idx_high",
"=",
"ci2g",
".",
"compute_interval_limits",
"(",
"self",
".",
"__bias_correction",
",",
"self",
".",
"__acceleration_value",
",",
"self",
".",
"__resamples",
",",
"ci",
")",
"self",
".",
"__bca_interval_idx",
"=",
"(",
"bca_idx_low",
",",
"bca_idx_high",
")",
"if",
"~",
"isnan",
"(",
"bca_idx_low",
")",
"and",
"~",
"isnan",
"(",
"bca_idx_high",
")",
":",
"self",
".",
"__bca_low",
"=",
"self",
".",
"__bootstraps",
"[",
"bca_idx_low",
"]",
"self",
".",
"__bca_high",
"=",
"self",
".",
"__bootstraps",
"[",
"bca_idx_high",
"]",
"err1",
"=",
"\"The $lim_type limit of the interval\"",
"err2",
"=",
"\"was in the $loc 10 values.\"",
"err3",
"=",
"\"The result should be considered unstable.\"",
"err_temp",
"=",
"Template",
"(",
"\" \"",
".",
"join",
"(",
"[",
"err1",
",",
"err2",
",",
"err3",
"]",
")",
")",
"if",
"bca_idx_low",
"<=",
"10",
":",
"warnings",
".",
"warn",
"(",
"err_temp",
".",
"substitute",
"(",
"lim_type",
"=",
"\"lower\"",
",",
"loc",
"=",
"\"bottom\"",
")",
",",
"stacklevel",
"=",
"1",
")",
"if",
"bca_idx_high",
">=",
"resamples",
"-",
"9",
":",
"warnings",
".",
"warn",
"(",
"err_temp",
".",
"substitute",
"(",
"lim_type",
"=",
"\"upper\"",
",",
"loc",
"=",
"\"top\"",
")",
",",
"stacklevel",
"=",
"1",
")",
"else",
":",
"err1",
"=",
"\"The $lim_type limit of the BCa interval cannot be computed.\"",
"err2",
"=",
"\"It is set to the effect size itself.\"",
"err3",
"=",
"\"All bootstrap values were likely all the same.\"",
"err_temp",
"=",
"Template",
"(",
"\" \"",
".",
"join",
"(",
"[",
"err1",
",",
"err2",
",",
"err3",
"]",
")",
")",
"if",
"isnan",
"(",
"bca_idx_low",
")",
":",
"self",
".",
"__bca_low",
"=",
"self",
".",
"__difference",
"warnings",
".",
"warn",
"(",
"err_temp",
".",
"substitute",
"(",
"lim_type",
"=",
"\"lower\"",
")",
",",
"stacklevel",
"=",
"0",
")",
"if",
"isnan",
"(",
"bca_idx_high",
")",
":",
"self",
".",
"__bca_high",
"=",
"self",
".",
"__difference",
"warnings",
".",
"warn",
"(",
"err_temp",
".",
"substitute",
"(",
"lim_type",
"=",
"\"upper\"",
")",
",",
"stacklevel",
"=",
"0",
")",
"# Compute percentile intervals.",
"pct_idx_low",
"=",
"int",
"(",
"(",
"self",
".",
"__alpha",
"/",
"2",
")",
"*",
"resamples",
")",
"pct_idx_high",
"=",
"int",
"(",
"(",
"1",
"-",
"(",
"self",
".",
"__alpha",
"/",
"2",
")",
")",
"*",
"resamples",
")",
"self",
".",
"__pct_interval_idx",
"=",
"(",
"pct_idx_low",
",",
"pct_idx_high",
")",
"self",
".",
"__pct_low",
"=",
"self",
".",
"__bootstraps",
"[",
"pct_idx_low",
"]",
"self",
".",
"__pct_high",
"=",
"self",
".",
"__bootstraps",
"[",
"pct_idx_high",
"]",
"# Perform statistical tests.",
"self",
".",
"__PermutationTest_result",
"=",
"PermutationTest",
"(",
"control",
",",
"test",
",",
"effect_size",
",",
"is_paired",
",",
"permutation_count",
")",
"if",
"is_paired",
"is",
"True",
":",
"# Wilcoxon, a non-parametric version of the paired T-test.",
"wilcoxon",
"=",
"spstats",
".",
"wilcoxon",
"(",
"control",
",",
"test",
")",
"self",
".",
"__pvalue_wilcoxon",
"=",
"wilcoxon",
".",
"pvalue",
"self",
".",
"__statistic_wilcoxon",
"=",
"wilcoxon",
".",
"statistic",
"# Introduced in v0.2.8, removed in v0.3.0 for performance issues.",
"# lqrt_result = lqrt.lqrtest_rel(control, test, ",
"# random_state=random_seed)",
"# self.__pvalue_paired_lqrt = lqrt_result.pvalue",
"# self.__statistic_paired_lqrt = lqrt_result.statistic",
"if",
"effect_size",
"!=",
"\"median_diff\"",
":",
"# Paired Student's t-test.",
"paired_t",
"=",
"spstats",
".",
"ttest_rel",
"(",
"control",
",",
"test",
",",
"nan_policy",
"=",
"'omit'",
")",
"self",
".",
"__pvalue_paired_students_t",
"=",
"paired_t",
".",
"pvalue",
"self",
".",
"__statistic_paired_students_t",
"=",
"paired_t",
".",
"statistic",
"standardized_es",
"=",
"es",
".",
"cohens_d",
"(",
"control",
",",
"test",
",",
"is_paired",
"=",
"True",
")",
"# self.__power = power.tt_solve_power(standardized_es,",
"# len(control),",
"# alpha=self.__alpha)",
"elif",
"effect_size",
"==",
"\"cliffs_delta\"",
":",
"# Let's go with Brunner-Munzel!",
"brunner_munzel",
"=",
"spstats",
".",
"brunnermunzel",
"(",
"control",
",",
"test",
",",
"nan_policy",
"=",
"'omit'",
")",
"self",
".",
"__pvalue_brunner_munzel",
"=",
"brunner_munzel",
".",
"pvalue",
"self",
".",
"__statistic_brunner_munzel",
"=",
"brunner_munzel",
".",
"statistic",
"elif",
"effect_size",
"==",
"\"median_diff\"",
":",
"# According to scipy's documentation of the function,",
"# \"The Kruskal-Wallis H-test tests the null hypothesis",
"# that the population median of all of the groups are equal.\"",
"kruskal",
"=",
"spstats",
".",
"kruskal",
"(",
"control",
",",
"test",
",",
"nan_policy",
"=",
"'omit'",
")",
"self",
".",
"__pvalue_kruskal",
"=",
"kruskal",
".",
"pvalue",
"self",
".",
"__statistic_kruskal",
"=",
"kruskal",
".",
"statistic",
"# self.__power = np.nan",
"else",
":",
"# for mean difference, Cohen's d, and Hedges' g.",
"# Welch's t-test, assumes normality of distributions,",
"# but does not assume equal variances.",
"welch",
"=",
"spstats",
".",
"ttest_ind",
"(",
"control",
",",
"test",
",",
"equal_var",
"=",
"False",
",",
"nan_policy",
"=",
"'omit'",
")",
"self",
".",
"__pvalue_welch",
"=",
"welch",
".",
"pvalue",
"self",
".",
"__statistic_welch",
"=",
"welch",
".",
"statistic",
"# Student's t-test, assumes normality of distributions,",
"# as well as assumption of equal variances.",
"students_t",
"=",
"spstats",
".",
"ttest_ind",
"(",
"control",
",",
"test",
",",
"equal_var",
"=",
"True",
",",
"nan_policy",
"=",
"'omit'",
")",
"self",
".",
"__pvalue_students_t",
"=",
"students_t",
".",
"pvalue",
"self",
".",
"__statistic_students_t",
"=",
"students_t",
".",
"statistic",
"# Mann-Whitney test: Non parametric,",
"# does not assume normality of distributions",
"try",
":",
"mann_whitney",
"=",
"spstats",
".",
"mannwhitneyu",
"(",
"control",
",",
"test",
",",
"alternative",
"=",
"'two-sided'",
")",
"self",
".",
"__pvalue_mann_whitney",
"=",
"mann_whitney",
".",
"pvalue",
"self",
".",
"__statistic_mann_whitney",
"=",
"mann_whitney",
".",
"statistic",
"except",
"ValueError",
":",
"# Occurs when the control and test are exactly identical",
"# in terms of rank (eg. all zeros.)",
"pass",
"# Introduced in v0.2.8, removed in v0.3.0 for performance issues.",
"# # Likelihood Q-Ratio test:",
"# lqrt_equal_var_result = lqrt.lqrtest_ind(control, test, ",
"# random_state=random_seed,",
"# equal_var=True)",
"# self.__pvalue_lqrt_equal_var = lqrt_equal_var_result.pvalue",
"# self.__statistic_lqrt_equal_var = lqrt_equal_var_result.statistic",
"# lqrt_unequal_var_result = lqrt.lqrtest_ind(control, test, ",
"# random_state=random_seed,",
"# equal_var=False)",
"# self.__pvalue_lqrt_unequal_var = lqrt_unequal_var_result.pvalue",
"# self.__statistic_lqrt_unequal_var = lqrt_unequal_var_result.statistic",
"standardized_es",
"=",
"es",
".",
"cohens_d",
"(",
"control",
",",
"test",
",",
"is_paired",
"=",
"False",
")"
] | https://github.com/ACCLAB/DABEST-python/blob/3ac87685a6c0859f731e9c9107bef8f32e39a61d/dabest/_classes.py#L557-L900 |
||
home-assistant/core | 265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1 | homeassistant/components/shelly/entity.py | python | ShellyBlockEntity.should_poll | (self) | return False | If device should be polled. | If device should be polled. | [
"If",
"device",
"should",
"be",
"polled",
"."
] | def should_poll(self) -> bool:
"""If device should be polled."""
return False | [
"def",
"should_poll",
"(",
"self",
")",
"->",
"bool",
":",
"return",
"False"
] | https://github.com/home-assistant/core/blob/265ebd17a3f17ed8dc1e9bdede03ac8e323f1ab1/homeassistant/components/shelly/entity.py#L292-L294 |
|
TengXiaoDai/DistributedCrawling | f5c2439e6ce68dd9b49bde084d76473ff9ed4963 | Lib/site-packages/pkg_resources/__init__.py | python | IResourceProvider.resource_isdir | (resource_name) | Is the named resource a directory? (like ``os.path.isdir()``) | Is the named resource a directory? (like ``os.path.isdir()``) | [
"Is",
"the",
"named",
"resource",
"a",
"directory?",
"(",
"like",
"os",
".",
"path",
".",
"isdir",
"()",
")"
] | def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)""" | [
"def",
"resource_isdir",
"(",
"resource_name",
")",
":"
] | https://github.com/TengXiaoDai/DistributedCrawling/blob/f5c2439e6ce68dd9b49bde084d76473ff9ed4963/Lib/site-packages/pkg_resources/__init__.py#L617-L618 |
||
rstacruz/sparkup | d400a570bf64b0c216aa7c8e1795820b911a7404 | sparkup.py | python | Element._populate | (self) | Expands with default items.
This is called when the [[populate]] flag is turned on. | Expands with default items. | [
"Expands",
"with",
"default",
"items",
"."
] | def _populate(self):
"""Expands with default items.
This is called when the [[populate]] flag is turned on.
"""
if self.name == 'ul':
elements = [Element(name='li', parent=self, parser=self.parser)]
elif self.name == 'dl':
elements = [
Element(name='dt', parent=self, parser=self.parser),
Element(name='dd', parent=self, parser=self.parser)]
elif self.name == 'table':
tr = Element(name='tr', parent=self, parser=self.parser)
td = Element(name='td', parent=tr, parser=self.parser)
tr.children.append(td)
elements = [tr]
else:
elements = []
for el in elements:
self.children.append(el) | [
"def",
"_populate",
"(",
"self",
")",
":",
"if",
"self",
".",
"name",
"==",
"'ul'",
":",
"elements",
"=",
"[",
"Element",
"(",
"name",
"=",
"'li'",
",",
"parent",
"=",
"self",
",",
"parser",
"=",
"self",
".",
"parser",
")",
"]",
"elif",
"self",
".",
"name",
"==",
"'dl'",
":",
"elements",
"=",
"[",
"Element",
"(",
"name",
"=",
"'dt'",
",",
"parent",
"=",
"self",
",",
"parser",
"=",
"self",
".",
"parser",
")",
",",
"Element",
"(",
"name",
"=",
"'dd'",
",",
"parent",
"=",
"self",
",",
"parser",
"=",
"self",
".",
"parser",
")",
"]",
"elif",
"self",
".",
"name",
"==",
"'table'",
":",
"tr",
"=",
"Element",
"(",
"name",
"=",
"'tr'",
",",
"parent",
"=",
"self",
",",
"parser",
"=",
"self",
".",
"parser",
")",
"td",
"=",
"Element",
"(",
"name",
"=",
"'td'",
",",
"parent",
"=",
"tr",
",",
"parser",
"=",
"self",
".",
"parser",
")",
"tr",
".",
"children",
".",
"append",
"(",
"td",
")",
"elements",
"=",
"[",
"tr",
"]",
"else",
":",
"elements",
"=",
"[",
"]",
"for",
"el",
"in",
"elements",
":",
"self",
".",
"children",
".",
"append",
"(",
"el",
")"
] | https://github.com/rstacruz/sparkup/blob/d400a570bf64b0c216aa7c8e1795820b911a7404/sparkup.py#L784-L808 |
||
MultiAgentLearning/playground | 8c06a21da5758b570b708e9dd3337e1fa1e67e71 | pommerman/agents/simple_agent.py | python | SimpleAgent._near_enemy | (cls, my_position, items, dist, prev, enemies, radius) | return cls._get_direction_towards_position(my_position,
nearest_enemy_position, prev) | [] | def _near_enemy(cls, my_position, items, dist, prev, enemies, radius):
nearest_enemy_position = cls._nearest_position(dist, enemies, items,
radius)
return cls._get_direction_towards_position(my_position,
nearest_enemy_position, prev) | [
"def",
"_near_enemy",
"(",
"cls",
",",
"my_position",
",",
"items",
",",
"dist",
",",
"prev",
",",
"enemies",
",",
"radius",
")",
":",
"nearest_enemy_position",
"=",
"cls",
".",
"_nearest_position",
"(",
"dist",
",",
"enemies",
",",
"items",
",",
"radius",
")",
"return",
"cls",
".",
"_get_direction_towards_position",
"(",
"my_position",
",",
"nearest_enemy_position",
",",
"prev",
")"
] | https://github.com/MultiAgentLearning/playground/blob/8c06a21da5758b570b708e9dd3337e1fa1e67e71/pommerman/agents/simple_agent.py#L386-L390 |
|||
mar10/wsgidav | b7ce6ce47c7f42b8f7eed0258b89fea4c1951233 | wsgidav/dc/base_dc.py | python | BaseDomainController.supports_http_digest_auth | (self) | Signal if this DC instance supports the HTTP digest authentication theme.
If true, `HTTPAuthenticator` will call `dc.digest_auth_user()`,
so this method must be implemented as well.
Returns:
bool | Signal if this DC instance supports the HTTP digest authentication theme. | [
"Signal",
"if",
"this",
"DC",
"instance",
"supports",
"the",
"HTTP",
"digest",
"authentication",
"theme",
"."
] | def supports_http_digest_auth(self):
"""Signal if this DC instance supports the HTTP digest authentication theme.
If true, `HTTPAuthenticator` will call `dc.digest_auth_user()`,
so this method must be implemented as well.
Returns:
bool
"""
raise NotImplementedError | [
"def",
"supports_http_digest_auth",
"(",
"self",
")",
":",
"raise",
"NotImplementedError"
] | https://github.com/mar10/wsgidav/blob/b7ce6ce47c7f42b8f7eed0258b89fea4c1951233/wsgidav/dc/base_dc.py#L169-L178 |
||
CedricGuillemet/Imogen | ee417b42747ed5b46cb11b02ef0c3630000085b3 | bin/Lib/inspect.py | python | _main | () | Logic for inspecting an object given at command line | Logic for inspecting an object given at command line | [
"Logic",
"for",
"inspecting",
"an",
"object",
"given",
"at",
"command",
"line"
] | def _main():
""" Logic for inspecting an object given at command line """
import argparse
import importlib
parser = argparse.ArgumentParser()
parser.add_argument(
'object',
help="The object to be analysed. "
"It supports the 'module:qualname' syntax")
parser.add_argument(
'-d', '--details', action='store_true',
help='Display info about the module rather than its source code')
args = parser.parse_args()
target = args.object
mod_name, has_attrs, attrs = target.partition(":")
try:
obj = module = importlib.import_module(mod_name)
except Exception as exc:
msg = "Failed to import {} ({}: {})".format(mod_name,
type(exc).__name__,
exc)
print(msg, file=sys.stderr)
exit(2)
if has_attrs:
parts = attrs.split(".")
obj = module
for part in parts:
obj = getattr(obj, part)
if module.__name__ in sys.builtin_module_names:
print("Can't get info for builtin modules.", file=sys.stderr)
exit(1)
if args.details:
print('Target: {}'.format(target))
print('Origin: {}'.format(getsourcefile(module)))
print('Cached: {}'.format(module.__cached__))
if obj is module:
print('Loader: {}'.format(repr(module.__loader__)))
if hasattr(module, '__path__'):
print('Submodule search path: {}'.format(module.__path__))
else:
try:
__, lineno = findsource(obj)
except Exception:
pass
else:
print('Line: {}'.format(lineno))
print('\n')
else:
print(getsource(obj)) | [
"def",
"_main",
"(",
")",
":",
"import",
"argparse",
"import",
"importlib",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"'object'",
",",
"help",
"=",
"\"The object to be analysed. \"",
"\"It supports the 'module:qualname' syntax\"",
")",
"parser",
".",
"add_argument",
"(",
"'-d'",
",",
"'--details'",
",",
"action",
"=",
"'store_true'",
",",
"help",
"=",
"'Display info about the module rather than its source code'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"target",
"=",
"args",
".",
"object",
"mod_name",
",",
"has_attrs",
",",
"attrs",
"=",
"target",
".",
"partition",
"(",
"\":\"",
")",
"try",
":",
"obj",
"=",
"module",
"=",
"importlib",
".",
"import_module",
"(",
"mod_name",
")",
"except",
"Exception",
"as",
"exc",
":",
"msg",
"=",
"\"Failed to import {} ({}: {})\"",
".",
"format",
"(",
"mod_name",
",",
"type",
"(",
"exc",
")",
".",
"__name__",
",",
"exc",
")",
"print",
"(",
"msg",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"exit",
"(",
"2",
")",
"if",
"has_attrs",
":",
"parts",
"=",
"attrs",
".",
"split",
"(",
"\".\"",
")",
"obj",
"=",
"module",
"for",
"part",
"in",
"parts",
":",
"obj",
"=",
"getattr",
"(",
"obj",
",",
"part",
")",
"if",
"module",
".",
"__name__",
"in",
"sys",
".",
"builtin_module_names",
":",
"print",
"(",
"\"Can't get info for builtin modules.\"",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"exit",
"(",
"1",
")",
"if",
"args",
".",
"details",
":",
"print",
"(",
"'Target: {}'",
".",
"format",
"(",
"target",
")",
")",
"print",
"(",
"'Origin: {}'",
".",
"format",
"(",
"getsourcefile",
"(",
"module",
")",
")",
")",
"print",
"(",
"'Cached: {}'",
".",
"format",
"(",
"module",
".",
"__cached__",
")",
")",
"if",
"obj",
"is",
"module",
":",
"print",
"(",
"'Loader: {}'",
".",
"format",
"(",
"repr",
"(",
"module",
".",
"__loader__",
")",
")",
")",
"if",
"hasattr",
"(",
"module",
",",
"'__path__'",
")",
":",
"print",
"(",
"'Submodule search path: {}'",
".",
"format",
"(",
"module",
".",
"__path__",
")",
")",
"else",
":",
"try",
":",
"__",
",",
"lineno",
"=",
"findsource",
"(",
"obj",
")",
"except",
"Exception",
":",
"pass",
"else",
":",
"print",
"(",
"'Line: {}'",
".",
"format",
"(",
"lineno",
")",
")",
"print",
"(",
"'\\n'",
")",
"else",
":",
"print",
"(",
"getsource",
"(",
"obj",
")",
")"
] | https://github.com/CedricGuillemet/Imogen/blob/ee417b42747ed5b46cb11b02ef0c3630000085b3/bin/Lib/inspect.py#L3078-L3133 |
||
KhronosGroup/OpenXR-SDK-Source | 76756e2e7849b15466d29bee7d80cada92865550 | external/python/jinja2/environment.py | python | Environment._compile | (self, source, filename) | return compile(source, filename, 'exec') | Internal hook that can be overridden to hook a different compile
method in.
.. versionadded:: 2.5 | Internal hook that can be overridden to hook a different compile
method in. | [
"Internal",
"hook",
"that",
"can",
"be",
"overridden",
"to",
"hook",
"a",
"different",
"compile",
"method",
"in",
"."
] | def _compile(self, source, filename):
"""Internal hook that can be overridden to hook a different compile
method in.
.. versionadded:: 2.5
"""
return compile(source, filename, 'exec') | [
"def",
"_compile",
"(",
"self",
",",
"source",
",",
"filename",
")",
":",
"return",
"compile",
"(",
"source",
",",
"filename",
",",
"'exec'",
")"
] | https://github.com/KhronosGroup/OpenXR-SDK-Source/blob/76756e2e7849b15466d29bee7d80cada92865550/external/python/jinja2/environment.py#L545-L551 |
|
openshift/openshift-tools | 1188778e728a6e4781acf728123e5b356380fe6f | openshift/installer/vendored/openshift-ansible-3.9.40/roles/lib_utils/library/openshift_cert_expiry.py | python | FakeOpenSSLCertificate.get_subject | (self) | return self.subject | Subjects must implement get_components() and return dicts or
tuples. An 'openssl x509 -in CERT.cert -text' with 'Subject':
Subject: Subject: O=system:nodes, CN=system:node:m01.example.com
might return: [('O=system', 'nodes'), ('CN=system', 'node:m01.example.com')] | Subjects must implement get_components() and return dicts or
tuples. An 'openssl x509 -in CERT.cert -text' with 'Subject': | [
"Subjects",
"must",
"implement",
"get_components",
"()",
"and",
"return",
"dicts",
"or",
"tuples",
".",
"An",
"openssl",
"x509",
"-",
"in",
"CERT",
".",
"cert",
"-",
"text",
"with",
"Subject",
":"
] | def get_subject(self):
"""Subjects must implement get_components() and return dicts or
tuples. An 'openssl x509 -in CERT.cert -text' with 'Subject':
Subject: Subject: O=system:nodes, CN=system:node:m01.example.com
might return: [('O=system', 'nodes'), ('CN=system', 'node:m01.example.com')]
"""
return self.subject | [
"def",
"get_subject",
"(",
"self",
")",
":",
"return",
"self",
".",
"subject"
] | https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.9.40/roles/lib_utils/library/openshift_cert_expiry.py#L163-L171 |
|
dump247/aws-mock-metadata | ec85bc8c6f41afa8fa624898d6ba1ee5315ebcc2 | metadata/otp.py | python | Totp.generate | (self, at=None) | return code % 10 ** self.digits | Generate a new OTP code.
Parameters:
at (datetime): timestamp to generate the code for or None to use current time
Returns:
(int): generated code | Generate a new OTP code. | [
"Generate",
"a",
"new",
"OTP",
"code",
"."
] | def generate(self, at=None):
"""
Generate a new OTP code.
Parameters:
at (datetime): timestamp to generate the code for or None to use current time
Returns:
(int): generated code
"""
timecode = self.__timecode(at or datetime.now())
hmac_hash = hmac.new(self.secret, timecode, self.digest).digest()
offset = ord(hmac_hash[19]) & 0xf
code = ((ord(hmac_hash[offset]) & 0x7f) << 24 |
(ord(hmac_hash[offset + 1]) & 0xff) << 16 |
(ord(hmac_hash[offset + 2]) & 0xff) << 8 |
(ord(hmac_hash[offset + 3]) & 0xff))
return code % 10 ** self.digits | [
"def",
"generate",
"(",
"self",
",",
"at",
"=",
"None",
")",
":",
"timecode",
"=",
"self",
".",
"__timecode",
"(",
"at",
"or",
"datetime",
".",
"now",
"(",
")",
")",
"hmac_hash",
"=",
"hmac",
".",
"new",
"(",
"self",
".",
"secret",
",",
"timecode",
",",
"self",
".",
"digest",
")",
".",
"digest",
"(",
")",
"offset",
"=",
"ord",
"(",
"hmac_hash",
"[",
"19",
"]",
")",
"&",
"0xf",
"code",
"=",
"(",
"(",
"ord",
"(",
"hmac_hash",
"[",
"offset",
"]",
")",
"&",
"0x7f",
")",
"<<",
"24",
"|",
"(",
"ord",
"(",
"hmac_hash",
"[",
"offset",
"+",
"1",
"]",
")",
"&",
"0xff",
")",
"<<",
"16",
"|",
"(",
"ord",
"(",
"hmac_hash",
"[",
"offset",
"+",
"2",
"]",
")",
"&",
"0xff",
")",
"<<",
"8",
"|",
"(",
"ord",
"(",
"hmac_hash",
"[",
"offset",
"+",
"3",
"]",
")",
"&",
"0xff",
")",
")",
"return",
"code",
"%",
"10",
"**",
"self",
".",
"digits"
] | https://github.com/dump247/aws-mock-metadata/blob/ec85bc8c6f41afa8fa624898d6ba1ee5315ebcc2/metadata/otp.py#L26-L45 |
|
certsocietegenerale/fame | 6bb758063b1ac5de6792fd3316a4635d1ffecfd1 | fame/core/module.py | python | ProcessingModule.add_probable_name | (self, probable_name) | Add a probable name to the analysis.
Args:
probable_name (string): probable name of the malware. | Add a probable name to the analysis. | [
"Add",
"a",
"probable",
"name",
"to",
"the",
"analysis",
"."
] | def add_probable_name(self, probable_name):
"""Add a probable name to the analysis.
Args:
probable_name (string): probable name of the malware.
"""
self._analysis.add_probable_name(probable_name) | [
"def",
"add_probable_name",
"(",
"self",
",",
"probable_name",
")",
":",
"self",
".",
"_analysis",
".",
"add_probable_name",
"(",
"probable_name",
")"
] | https://github.com/certsocietegenerale/fame/blob/6bb758063b1ac5de6792fd3316a4635d1ffecfd1/fame/core/module.py#L374-L380 |
||
openstack/keystone | 771c943ad2116193e7bb118c74993c829d93bd71 | keystone/receipt/receipt_formatters.py | python | ReceiptFormatter.crypto | (self) | return fernet.MultiFernet(fernet_instances) | Return a cryptography instance.
You can extend this class with a custom crypto @property to provide
your own receipt encoding / decoding. For example, using a different
cryptography library (e.g. ``python-keyczar``) or to meet arbitrary
security requirements.
This @property just needs to return an object that implements
``encrypt(plaintext)`` and ``decrypt(ciphertext)``. | Return a cryptography instance. | [
"Return",
"a",
"cryptography",
"instance",
"."
] | def crypto(self):
"""Return a cryptography instance.
You can extend this class with a custom crypto @property to provide
your own receipt encoding / decoding. For example, using a different
cryptography library (e.g. ``python-keyczar``) or to meet arbitrary
security requirements.
This @property just needs to return an object that implements
``encrypt(plaintext)`` and ``decrypt(ciphertext)``.
"""
fernet_utils = utils.FernetUtils(
CONF.fernet_receipts.key_repository,
CONF.fernet_receipts.max_active_keys,
'fernet_receipts'
)
keys = fernet_utils.load_keys()
if not keys:
raise exception.KeysNotFound()
fernet_instances = [fernet.Fernet(key) for key in keys]
return fernet.MultiFernet(fernet_instances) | [
"def",
"crypto",
"(",
"self",
")",
":",
"fernet_utils",
"=",
"utils",
".",
"FernetUtils",
"(",
"CONF",
".",
"fernet_receipts",
".",
"key_repository",
",",
"CONF",
".",
"fernet_receipts",
".",
"max_active_keys",
",",
"'fernet_receipts'",
")",
"keys",
"=",
"fernet_utils",
".",
"load_keys",
"(",
")",
"if",
"not",
"keys",
":",
"raise",
"exception",
".",
"KeysNotFound",
"(",
")",
"fernet_instances",
"=",
"[",
"fernet",
".",
"Fernet",
"(",
"key",
")",
"for",
"key",
"in",
"keys",
"]",
"return",
"fernet",
".",
"MultiFernet",
"(",
"fernet_instances",
")"
] | https://github.com/openstack/keystone/blob/771c943ad2116193e7bb118c74993c829d93bd71/keystone/receipt/receipt_formatters.py#L46-L69 |
|
openedx/edx-platform | 68dd185a0ab45862a2a61e0f803d7e03d2be71b5 | common/djangoapps/pipeline_mako/__init__.py | python | render_require_js_path_overrides | (path_overrides) | return html.format(overrides=',\n'.join(new_paths)) | Render JavaScript to override default RequireJS paths.
The Django pipeline appends a hash to JavaScript files,
so if the JS asset isn't included in the bundle for the page,
we need to tell RequireJS where to look.
For example:
"js/vendor/jquery.js" --> "js/vendor/jquery.abcd1234"
To achive this we will add overrided paths in requirejs config at runtime.
So that any reference to 'jquery' in a JavaScript module
will cause RequireJS to load '/static/js/vendor/jquery.abcd1234.js'
If running in DEBUG mode (as in devstack), the resolved JavaScript URLs
won't contain hashes, so the new paths will match the original paths.
Arguments:
path_overrides (dict): Mapping of RequireJS module names to
filesystem paths.
Returns:
unicode: The HTML of the <script> tag with the path overrides. | Render JavaScript to override default RequireJS paths. | [
"Render",
"JavaScript",
"to",
"override",
"default",
"RequireJS",
"paths",
"."
] | def render_require_js_path_overrides(path_overrides):
"""Render JavaScript to override default RequireJS paths.
The Django pipeline appends a hash to JavaScript files,
so if the JS asset isn't included in the bundle for the page,
we need to tell RequireJS where to look.
For example:
"js/vendor/jquery.js" --> "js/vendor/jquery.abcd1234"
To achive this we will add overrided paths in requirejs config at runtime.
So that any reference to 'jquery' in a JavaScript module
will cause RequireJS to load '/static/js/vendor/jquery.abcd1234.js'
If running in DEBUG mode (as in devstack), the resolved JavaScript URLs
won't contain hashes, so the new paths will match the original paths.
Arguments:
path_overrides (dict): Mapping of RequireJS module names to
filesystem paths.
Returns:
unicode: The HTML of the <script> tag with the path overrides.
"""
# Render the <script> tag that overrides the paths
# Note: We don't use a Mako template to render this because Mako apparently
# acquires a lock when loading templates, which can lead to a deadlock if
# this function is called from within another template.
# The rendered <script> tag with overrides should be included *after*
# the application's RequireJS config, which defines a `require` object.
html = '''<script type="text/javascript">
(function (require) {{
require.config({{
paths: {{
{overrides}
}}
}});
}}).call(this, require || RequireJS.require);
</script>'''
new_paths = []
for module in path_overrides:
# Calculate the full URL, including any hashes added to the filename by the pipeline.
# This will also include the base static URL (for example, "/static/") and the
# ".js" extension.
actual_url = staticfiles_storage.url(path_overrides[module])
# RequireJS assumes that every file it tries to load has a ".js" extension, so
# we need to remove ".js" from the module path.
# RequireJS also already has a base URL set to the base static URL, so we can remove that.
path = actual_url.replace('.js', '').replace(django_settings.STATIC_URL, '')
new_paths.append(f"'{module}': '{path}'")
return html.format(overrides=',\n'.join(new_paths)) | [
"def",
"render_require_js_path_overrides",
"(",
"path_overrides",
")",
":",
"# Render the <script> tag that overrides the paths",
"# Note: We don't use a Mako template to render this because Mako apparently",
"# acquires a lock when loading templates, which can lead to a deadlock if",
"# this function is called from within another template.",
"# The rendered <script> tag with overrides should be included *after*",
"# the application's RequireJS config, which defines a `require` object.",
"html",
"=",
"'''<script type=\"text/javascript\">\n (function (require) {{\n require.config({{\n paths: {{\n {overrides}\n }}\n }});\n }}).call(this, require || RequireJS.require);\n </script>'''",
"new_paths",
"=",
"[",
"]",
"for",
"module",
"in",
"path_overrides",
":",
"# Calculate the full URL, including any hashes added to the filename by the pipeline.",
"# This will also include the base static URL (for example, \"/static/\") and the",
"# \".js\" extension.",
"actual_url",
"=",
"staticfiles_storage",
".",
"url",
"(",
"path_overrides",
"[",
"module",
"]",
")",
"# RequireJS assumes that every file it tries to load has a \".js\" extension, so",
"# we need to remove \".js\" from the module path.",
"# RequireJS also already has a base URL set to the base static URL, so we can remove that.",
"path",
"=",
"actual_url",
".",
"replace",
"(",
"'.js'",
",",
"''",
")",
".",
"replace",
"(",
"django_settings",
".",
"STATIC_URL",
",",
"''",
")",
"new_paths",
".",
"append",
"(",
"f\"'{module}': '{path}'\"",
")",
"return",
"html",
".",
"format",
"(",
"overrides",
"=",
"',\\n'",
".",
"join",
"(",
"new_paths",
")",
")"
] | https://github.com/openedx/edx-platform/blob/68dd185a0ab45862a2a61e0f803d7e03d2be71b5/common/djangoapps/pipeline_mako/__init__.py#L88-L145 |
|
IronLanguages/main | a949455434b1fda8c783289e897e78a9a0caabb5 | Hosts/MerlinWeb/examples/Global.py | python | Application_Error | (app, e) | Code that runs when an unhandled error occurs | Code that runs when an unhandled error occurs | [
"Code",
"that",
"runs",
"when",
"an",
"unhandled",
"error",
"occurs"
] | def Application_Error(app, e):
' Code that runs when an unhandled error occurs'
pass | [
"def",
"Application_Error",
"(",
"app",
",",
"e",
")",
":",
"pass"
] | https://github.com/IronLanguages/main/blob/a949455434b1fda8c783289e897e78a9a0caabb5/Hosts/MerlinWeb/examples/Global.py#L19-L21 |
||
travitch/whole-program-llvm | c66344231720b4a0780c5bed4219cb5314bbcad5 | wllvm/compilers.py | python | ClangBuilder.getBitcodeGenerationFlags | (self) | return [] | [] | def getBitcodeGenerationFlags(self):
# iam: If the environment variable LLVM_BITCODE_GENERATION_FLAGS is set we will add them to the
# bitcode generation step
bitcodeFLAGS = os.getenv('LLVM_BITCODE_GENERATION_FLAGS')
if bitcodeFLAGS:
return bitcodeFLAGS.split()
return [] | [
"def",
"getBitcodeGenerationFlags",
"(",
"self",
")",
":",
"# iam: If the environment variable LLVM_BITCODE_GENERATION_FLAGS is set we will add them to the",
"# bitcode generation step",
"bitcodeFLAGS",
"=",
"os",
".",
"getenv",
"(",
"'LLVM_BITCODE_GENERATION_FLAGS'",
")",
"if",
"bitcodeFLAGS",
":",
"return",
"bitcodeFLAGS",
".",
"split",
"(",
")",
"return",
"[",
"]"
] | https://github.com/travitch/whole-program-llvm/blob/c66344231720b4a0780c5bed4219cb5314bbcad5/wllvm/compilers.py#L205-L211 |
|||
sqlalchemy/sqlalchemy | eb716884a4abcabae84a6aaba105568e925b7d27 | lib/sqlalchemy/engine/events.py | python | DialectEvents.do_setinputsizes | (
self, inputsizes, cursor, statement, parameters, context
) | Receive the setinputsizes dictionary for possible modification.
This event is emitted in the case where the dialect makes use of the
DBAPI ``cursor.setinputsizes()`` method which passes information about
parameter binding for a particular statement. The given
``inputsizes`` dictionary will contain :class:`.BindParameter` objects
as keys, linked to DBAPI-specific type objects as values; for
parameters that are not bound, they are added to the dictionary with
``None`` as the value, which means the parameter will not be included
in the ultimate setinputsizes call. The event may be used to inspect
and/or log the datatypes that are being bound, as well as to modify the
dictionary in place. Parameters can be added, modified, or removed
from this dictionary. Callers will typically want to inspect the
:attr:`.BindParameter.type` attribute of the given bind objects in
order to make decisions about the DBAPI object.
After the event, the ``inputsizes`` dictionary is converted into
an appropriate datastructure to be passed to ``cursor.setinputsizes``;
either a list for a positional bound parameter execution style,
or a dictionary of string parameter keys to DBAPI type objects for
a named bound parameter execution style.
The setinputsizes hook overall is only used for dialects which include
the flag ``use_setinputsizes=True``. Dialects which use this
include cx_Oracle, pg8000, asyncpg, and pyodbc dialects.
.. note::
For use with pyodbc, the ``use_setinputsizes`` flag
must be passed to the dialect, e.g.::
create_engine("mssql+pyodbc://...", use_setinputsizes=True)
.. seealso::
:ref:`mssql_pyodbc_setinputsizes`
.. versionadded:: 1.2.9
.. seealso::
:ref:`cx_oracle_setinputsizes` | Receive the setinputsizes dictionary for possible modification. | [
"Receive",
"the",
"setinputsizes",
"dictionary",
"for",
"possible",
"modification",
"."
] | def do_setinputsizes(
self, inputsizes, cursor, statement, parameters, context
):
"""Receive the setinputsizes dictionary for possible modification.
This event is emitted in the case where the dialect makes use of the
DBAPI ``cursor.setinputsizes()`` method which passes information about
parameter binding for a particular statement. The given
``inputsizes`` dictionary will contain :class:`.BindParameter` objects
as keys, linked to DBAPI-specific type objects as values; for
parameters that are not bound, they are added to the dictionary with
``None`` as the value, which means the parameter will not be included
in the ultimate setinputsizes call. The event may be used to inspect
and/or log the datatypes that are being bound, as well as to modify the
dictionary in place. Parameters can be added, modified, or removed
from this dictionary. Callers will typically want to inspect the
:attr:`.BindParameter.type` attribute of the given bind objects in
order to make decisions about the DBAPI object.
After the event, the ``inputsizes`` dictionary is converted into
an appropriate datastructure to be passed to ``cursor.setinputsizes``;
either a list for a positional bound parameter execution style,
or a dictionary of string parameter keys to DBAPI type objects for
a named bound parameter execution style.
The setinputsizes hook overall is only used for dialects which include
the flag ``use_setinputsizes=True``. Dialects which use this
include cx_Oracle, pg8000, asyncpg, and pyodbc dialects.
.. note::
For use with pyodbc, the ``use_setinputsizes`` flag
must be passed to the dialect, e.g.::
create_engine("mssql+pyodbc://...", use_setinputsizes=True)
.. seealso::
:ref:`mssql_pyodbc_setinputsizes`
.. versionadded:: 1.2.9
.. seealso::
:ref:`cx_oracle_setinputsizes`
"""
pass | [
"def",
"do_setinputsizes",
"(",
"self",
",",
"inputsizes",
",",
"cursor",
",",
"statement",
",",
"parameters",
",",
"context",
")",
":",
"pass"
] | https://github.com/sqlalchemy/sqlalchemy/blob/eb716884a4abcabae84a6aaba105568e925b7d27/lib/sqlalchemy/engine/events.py#L773-L820 |
||
geopandas/geopandas | 8e7133aef9e6c0d2465e07e92d954e95dedd3881 | geopandas/base.py | python | GeoPandasBase.simplify | (self, *args, **kwargs) | return _delegate_geo_method("simplify", self, *args, **kwargs) | Returns a ``GeoSeries`` containing a simplified representation of
each geometry.
The algorithm (Douglas-Peucker) recursively splits the original line
into smaller parts and connects these parts’ endpoints
by a straight line. Then, it removes all points whose distance
to the straight line is smaller than `tolerance`. It does not
move any points and it always preserves endpoints of
the original line or polygon.
See http://shapely.readthedocs.io/en/latest/manual.html#object.simplify
for details
Parameters
----------
tolerance : float
All parts of a simplified geometry will be no more than
`tolerance` distance from the original. It has the same units
as the coordinate reference system of the GeoSeries.
For example, using `tolerance=100` in a projected CRS with meters
as units means a distance of 100 meters in reality.
preserve_topology: bool (default True)
False uses a quicker algorithm, but may produce self-intersecting
or otherwise invalid geometries.
Notes
-----
Invalid geometric objects may result from simplification that does not
preserve topology and simplification may be sensitive to the order of
coordinates: two geometries differing only in order of coordinates may be
simplified differently.
Examples
--------
>>> from shapely.geometry import Point, LineString
>>> s = geopandas.GeoSeries(
... [Point(0, 0).buffer(1), LineString([(0, 0), (1, 10), (0, 20)])]
... )
>>> s
0 POLYGON ((1.00000 0.00000, 0.99518 -0.09802, 0...
1 LINESTRING (0.00000 0.00000, 1.00000 10.00000,...
dtype: geometry
>>> s.simplify(1)
0 POLYGON ((1.00000 0.00000, 0.00000 -1.00000, -...
1 LINESTRING (0.00000 0.00000, 0.00000 20.00000)
dtype: geometry | Returns a ``GeoSeries`` containing a simplified representation of
each geometry. | [
"Returns",
"a",
"GeoSeries",
"containing",
"a",
"simplified",
"representation",
"of",
"each",
"geometry",
"."
] | def simplify(self, *args, **kwargs):
"""Returns a ``GeoSeries`` containing a simplified representation of
each geometry.
The algorithm (Douglas-Peucker) recursively splits the original line
into smaller parts and connects these parts’ endpoints
by a straight line. Then, it removes all points whose distance
to the straight line is smaller than `tolerance`. It does not
move any points and it always preserves endpoints of
the original line or polygon.
See http://shapely.readthedocs.io/en/latest/manual.html#object.simplify
for details
Parameters
----------
tolerance : float
All parts of a simplified geometry will be no more than
`tolerance` distance from the original. It has the same units
as the coordinate reference system of the GeoSeries.
For example, using `tolerance=100` in a projected CRS with meters
as units means a distance of 100 meters in reality.
preserve_topology: bool (default True)
False uses a quicker algorithm, but may produce self-intersecting
or otherwise invalid geometries.
Notes
-----
Invalid geometric objects may result from simplification that does not
preserve topology and simplification may be sensitive to the order of
coordinates: two geometries differing only in order of coordinates may be
simplified differently.
Examples
--------
>>> from shapely.geometry import Point, LineString
>>> s = geopandas.GeoSeries(
... [Point(0, 0).buffer(1), LineString([(0, 0), (1, 10), (0, 20)])]
... )
>>> s
0 POLYGON ((1.00000 0.00000, 0.99518 -0.09802, 0...
1 LINESTRING (0.00000 0.00000, 1.00000 10.00000,...
dtype: geometry
>>> s.simplify(1)
0 POLYGON ((1.00000 0.00000, 0.00000 -1.00000, -...
1 LINESTRING (0.00000 0.00000, 0.00000 20.00000)
dtype: geometry
"""
return _delegate_geo_method("simplify", self, *args, **kwargs) | [
"def",
"simplify",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_delegate_geo_method",
"(",
"\"simplify\"",
",",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/geopandas/geopandas/blob/8e7133aef9e6c0d2465e07e92d954e95dedd3881/geopandas/base.py#L2727-L2775 |
|
bendmorris/static-python | 2e0f8c4d7ed5b359dc7d8a75b6fb37e6b6c5c473 | Lib/numbers.py | python | Complex.__radd__ | (self, other) | other + self | other + self | [
"other",
"+",
"self"
] | def __radd__(self, other):
"""other + self"""
raise NotImplementedError | [
"def",
"__radd__",
"(",
"self",
",",
"other",
")",
":",
"raise",
"NotImplementedError"
] | https://github.com/bendmorris/static-python/blob/2e0f8c4d7ed5b359dc7d8a75b6fb37e6b6c5c473/Lib/numbers.py#L77-L79 |
||
caiiiac/Machine-Learning-with-Python | 1a26c4467da41ca4ebc3d5bd789ea942ef79422f | MachineLearning/venv/lib/python3.5/site-packages/scipy/signal/filter_design.py | python | zpk2tf | (z, p, k) | return b, a | Return polynomial transfer function representation from zeros and poles
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients. | Return polynomial transfer function representation from zeros and poles | [
"Return",
"polynomial",
"transfer",
"function",
"representation",
"from",
"zeros",
"and",
"poles"
] | def zpk2tf(z, p, k):
"""
Return polynomial transfer function representation from zeros and poles
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
"""
z = atleast_1d(z)
k = atleast_1d(k)
if len(z.shape) > 1:
temp = poly(z[0])
b = zeros((z.shape[0], z.shape[1] + 1), temp.dtype.char)
if len(k) == 1:
k = [k[0]] * z.shape[0]
for i in range(z.shape[0]):
b[i] = k[i] * poly(z[i])
else:
b = k * poly(z)
a = atleast_1d(poly(p))
# Use real output if possible. Copied from numpy.poly, since
# we can't depend on a specific version of numpy.
if issubclass(b.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(z, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
b = b.real.copy()
if issubclass(a.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(p, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
a = a.real.copy()
return b, a | [
"def",
"zpk2tf",
"(",
"z",
",",
"p",
",",
"k",
")",
":",
"z",
"=",
"atleast_1d",
"(",
"z",
")",
"k",
"=",
"atleast_1d",
"(",
"k",
")",
"if",
"len",
"(",
"z",
".",
"shape",
")",
">",
"1",
":",
"temp",
"=",
"poly",
"(",
"z",
"[",
"0",
"]",
")",
"b",
"=",
"zeros",
"(",
"(",
"z",
".",
"shape",
"[",
"0",
"]",
",",
"z",
".",
"shape",
"[",
"1",
"]",
"+",
"1",
")",
",",
"temp",
".",
"dtype",
".",
"char",
")",
"if",
"len",
"(",
"k",
")",
"==",
"1",
":",
"k",
"=",
"[",
"k",
"[",
"0",
"]",
"]",
"*",
"z",
".",
"shape",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"z",
".",
"shape",
"[",
"0",
"]",
")",
":",
"b",
"[",
"i",
"]",
"=",
"k",
"[",
"i",
"]",
"*",
"poly",
"(",
"z",
"[",
"i",
"]",
")",
"else",
":",
"b",
"=",
"k",
"*",
"poly",
"(",
"z",
")",
"a",
"=",
"atleast_1d",
"(",
"poly",
"(",
"p",
")",
")",
"# Use real output if possible. Copied from numpy.poly, since",
"# we can't depend on a specific version of numpy.",
"if",
"issubclass",
"(",
"b",
".",
"dtype",
".",
"type",
",",
"numpy",
".",
"complexfloating",
")",
":",
"# if complex roots are all complex conjugates, the roots are real.",
"roots",
"=",
"numpy",
".",
"asarray",
"(",
"z",
",",
"complex",
")",
"pos_roots",
"=",
"numpy",
".",
"compress",
"(",
"roots",
".",
"imag",
">",
"0",
",",
"roots",
")",
"neg_roots",
"=",
"numpy",
".",
"conjugate",
"(",
"numpy",
".",
"compress",
"(",
"roots",
".",
"imag",
"<",
"0",
",",
"roots",
")",
")",
"if",
"len",
"(",
"pos_roots",
")",
"==",
"len",
"(",
"neg_roots",
")",
":",
"if",
"numpy",
".",
"all",
"(",
"numpy",
".",
"sort_complex",
"(",
"neg_roots",
")",
"==",
"numpy",
".",
"sort_complex",
"(",
"pos_roots",
")",
")",
":",
"b",
"=",
"b",
".",
"real",
".",
"copy",
"(",
")",
"if",
"issubclass",
"(",
"a",
".",
"dtype",
".",
"type",
",",
"numpy",
".",
"complexfloating",
")",
":",
"# if complex roots are all complex conjugates, the roots are real.",
"roots",
"=",
"numpy",
".",
"asarray",
"(",
"p",
",",
"complex",
")",
"pos_roots",
"=",
"numpy",
".",
"compress",
"(",
"roots",
".",
"imag",
">",
"0",
",",
"roots",
")",
"neg_roots",
"=",
"numpy",
".",
"conjugate",
"(",
"numpy",
".",
"compress",
"(",
"roots",
".",
"imag",
"<",
"0",
",",
"roots",
")",
")",
"if",
"len",
"(",
"pos_roots",
")",
"==",
"len",
"(",
"neg_roots",
")",
":",
"if",
"numpy",
".",
"all",
"(",
"numpy",
".",
"sort_complex",
"(",
"neg_roots",
")",
"==",
"numpy",
".",
"sort_complex",
"(",
"pos_roots",
")",
")",
":",
"a",
"=",
"a",
".",
"real",
".",
"copy",
"(",
")",
"return",
"b",
",",
"a"
] | https://github.com/caiiiac/Machine-Learning-with-Python/blob/1a26c4467da41ca4ebc3d5bd789ea942ef79422f/MachineLearning/venv/lib/python3.5/site-packages/scipy/signal/filter_design.py#L907-L963 |
|
ni/nidaqmx-python | 62fc6b48cbbb330fe1bcc9aedadc86610a1269b6 | nidaqmx/_task_modules/channels/ci_channel.py | python | CIChannel.ci_count_edges_count_dir_dig_fltr_timebase_rate | (self) | [] | def ci_count_edges_count_dir_dig_fltr_timebase_rate(self):
cfunc = (lib_importer.windll.
DAQmxResetCICountEdgesCountDirDigFltrTimebaseRate)
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code) | [
"def",
"ci_count_edges_count_dir_dig_fltr_timebase_rate",
"(",
"self",
")",
":",
"cfunc",
"=",
"(",
"lib_importer",
".",
"windll",
".",
"DAQmxResetCICountEdgesCountDirDigFltrTimebaseRate",
")",
"if",
"cfunc",
".",
"argtypes",
"is",
"None",
":",
"with",
"cfunc",
".",
"arglock",
":",
"if",
"cfunc",
".",
"argtypes",
"is",
"None",
":",
"cfunc",
".",
"argtypes",
"=",
"[",
"lib_importer",
".",
"task_handle",
",",
"ctypes_byte_str",
"]",
"error_code",
"=",
"cfunc",
"(",
"self",
".",
"_handle",
",",
"self",
".",
"_name",
")",
"check_for_error",
"(",
"error_code",
")"
] | https://github.com/ni/nidaqmx-python/blob/62fc6b48cbbb330fe1bcc9aedadc86610a1269b6/nidaqmx/_task_modules/channels/ci_channel.py#L396-L407 |
||||
wwqgtxx/wwqLyParse | 33136508e52821babd9294fdecffbdf02d73a6fc | wwqLyParse/lib/flask_lib/werkzeug/urls.py | python | BytesURL.encode_netloc | (self) | return self.netloc | Returns the netloc unchanged as bytes. | Returns the netloc unchanged as bytes. | [
"Returns",
"the",
"netloc",
"unchanged",
"as",
"bytes",
"."
] | def encode_netloc(self):
"""Returns the netloc unchanged as bytes."""
return self.netloc | [
"def",
"encode_netloc",
"(",
"self",
")",
":",
"return",
"self",
".",
"netloc"
] | https://github.com/wwqgtxx/wwqLyParse/blob/33136508e52821babd9294fdecffbdf02d73a6fc/wwqLyParse/lib/flask_lib/werkzeug/urls.py#L344-L346 |
|
triaquae/triaquae | bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9 | TriAquae/models/Centos_5.9/paramiko/sftp_client.py | python | SFTPClient.stat | (self, path) | return SFTPAttributes._from_msg(msg) | Retrieve information about a file on the remote system. The return
value is an object whose attributes correspond to the attributes of
python's C{stat} structure as returned by C{os.stat}, except that it
contains fewer fields. An SFTP server may return as much or as little
info as it wants, so the results may vary from server to server.
Unlike a python C{stat} object, the result may not be accessed as a
tuple. This is mostly due to the author's slack factor.
The fields supported are: C{st_mode}, C{st_size}, C{st_uid}, C{st_gid},
C{st_atime}, and C{st_mtime}.
@param path: the filename to stat
@type path: str
@return: an object containing attributes about the given file
@rtype: SFTPAttributes | Retrieve information about a file on the remote system. The return
value is an object whose attributes correspond to the attributes of
python's C{stat} structure as returned by C{os.stat}, except that it
contains fewer fields. An SFTP server may return as much or as little
info as it wants, so the results may vary from server to server. | [
"Retrieve",
"information",
"about",
"a",
"file",
"on",
"the",
"remote",
"system",
".",
"The",
"return",
"value",
"is",
"an",
"object",
"whose",
"attributes",
"correspond",
"to",
"the",
"attributes",
"of",
"python",
"s",
"C",
"{",
"stat",
"}",
"structure",
"as",
"returned",
"by",
"C",
"{",
"os",
".",
"stat",
"}",
"except",
"that",
"it",
"contains",
"fewer",
"fields",
".",
"An",
"SFTP",
"server",
"may",
"return",
"as",
"much",
"or",
"as",
"little",
"info",
"as",
"it",
"wants",
"so",
"the",
"results",
"may",
"vary",
"from",
"server",
"to",
"server",
"."
] | def stat(self, path):
"""
Retrieve information about a file on the remote system. The return
value is an object whose attributes correspond to the attributes of
python's C{stat} structure as returned by C{os.stat}, except that it
contains fewer fields. An SFTP server may return as much or as little
info as it wants, so the results may vary from server to server.
Unlike a python C{stat} object, the result may not be accessed as a
tuple. This is mostly due to the author's slack factor.
The fields supported are: C{st_mode}, C{st_size}, C{st_uid}, C{st_gid},
C{st_atime}, and C{st_mtime}.
@param path: the filename to stat
@type path: str
@return: an object containing attributes about the given file
@rtype: SFTPAttributes
"""
path = self._adjust_cwd(path)
self._log(DEBUG, 'stat(%r)' % path)
t, msg = self._request(CMD_STAT, path)
if t != CMD_ATTRS:
raise SFTPError('Expected attributes')
return SFTPAttributes._from_msg(msg) | [
"def",
"stat",
"(",
"self",
",",
"path",
")",
":",
"path",
"=",
"self",
".",
"_adjust_cwd",
"(",
"path",
")",
"self",
".",
"_log",
"(",
"DEBUG",
",",
"'stat(%r)'",
"%",
"path",
")",
"t",
",",
"msg",
"=",
"self",
".",
"_request",
"(",
"CMD_STAT",
",",
"path",
")",
"if",
"t",
"!=",
"CMD_ATTRS",
":",
"raise",
"SFTPError",
"(",
"'Expected attributes'",
")",
"return",
"SFTPAttributes",
".",
"_from_msg",
"(",
"msg",
")"
] | https://github.com/triaquae/triaquae/blob/bbabf736b3ba56a0c6498e7f04e16c13b8b8f2b9/TriAquae/models/Centos_5.9/paramiko/sftp_client.py#L316-L340 |
|
zhl2008/awd-platform | 0416b31abea29743387b10b3914581fbe8e7da5e | web_flaskbb/Python-2.7.9/Demo/curses/life.py | python | main | (stdscr) | [] | def main(stdscr):
keyloop(stdscr) | [
"def",
"main",
"(",
"stdscr",
")",
":",
"keyloop",
"(",
"stdscr",
")"
] | https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/Python-2.7.9/Demo/curses/life.py#L211-L212 |
||||
mautrix/telegram | 9f48eca5a6654bc38012cb761ecaaaf416aabdd0 | mautrix_telegram/portal.py | python | Portal._photo_size_key | (photo: TypePhotoSize) | [] | def _photo_size_key(photo: TypePhotoSize) -> int:
if isinstance(photo, PhotoSize):
return photo.size
elif isinstance(photo, PhotoSizeProgressive):
return max(photo.sizes)
elif isinstance(photo, PhotoSizeEmpty):
return 0
else:
return len(photo.bytes) | [
"def",
"_photo_size_key",
"(",
"photo",
":",
"TypePhotoSize",
")",
"->",
"int",
":",
"if",
"isinstance",
"(",
"photo",
",",
"PhotoSize",
")",
":",
"return",
"photo",
".",
"size",
"elif",
"isinstance",
"(",
"photo",
",",
"PhotoSizeProgressive",
")",
":",
"return",
"max",
"(",
"photo",
".",
"sizes",
")",
"elif",
"isinstance",
"(",
"photo",
",",
"PhotoSizeEmpty",
")",
":",
"return",
"0",
"else",
":",
"return",
"len",
"(",
"photo",
".",
"bytes",
")"
] | https://github.com/mautrix/telegram/blob/9f48eca5a6654bc38012cb761ecaaaf416aabdd0/mautrix_telegram/portal.py#L3125-L3133 |
||||
giampaolo/psutil | 55161bd4850986359a029f1c9a81bcf66f37afa8 | psutil/_common.py | python | memoize | (fun) | return wrapper | A simple memoize decorator for functions supporting (hashable)
positional arguments.
It also provides a cache_clear() function for clearing the cache:
>>> @memoize
... def foo()
... return 1
...
>>> foo()
1
>>> foo.cache_clear()
>>> | A simple memoize decorator for functions supporting (hashable)
positional arguments.
It also provides a cache_clear() function for clearing the cache: | [
"A",
"simple",
"memoize",
"decorator",
"for",
"functions",
"supporting",
"(",
"hashable",
")",
"positional",
"arguments",
".",
"It",
"also",
"provides",
"a",
"cache_clear",
"()",
"function",
"for",
"clearing",
"the",
"cache",
":"
] | def memoize(fun):
"""A simple memoize decorator for functions supporting (hashable)
positional arguments.
It also provides a cache_clear() function for clearing the cache:
>>> @memoize
... def foo()
... return 1
...
>>> foo()
1
>>> foo.cache_clear()
>>>
"""
@functools.wraps(fun)
def wrapper(*args, **kwargs):
key = (args, frozenset(sorted(kwargs.items())))
try:
return cache[key]
except KeyError:
ret = cache[key] = fun(*args, **kwargs)
return ret
def cache_clear():
"""Clear cache."""
cache.clear()
cache = {}
wrapper.cache_clear = cache_clear
return wrapper | [
"def",
"memoize",
"(",
"fun",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"fun",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"key",
"=",
"(",
"args",
",",
"frozenset",
"(",
"sorted",
"(",
"kwargs",
".",
"items",
"(",
")",
")",
")",
")",
"try",
":",
"return",
"cache",
"[",
"key",
"]",
"except",
"KeyError",
":",
"ret",
"=",
"cache",
"[",
"key",
"]",
"=",
"fun",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"ret",
"def",
"cache_clear",
"(",
")",
":",
"\"\"\"Clear cache.\"\"\"",
"cache",
".",
"clear",
"(",
")",
"cache",
"=",
"{",
"}",
"wrapper",
".",
"cache_clear",
"=",
"cache_clear",
"return",
"wrapper"
] | https://github.com/giampaolo/psutil/blob/55161bd4850986359a029f1c9a81bcf66f37afa8/psutil/_common.py#L383-L412 |
|
openshift/openshift-tools | 1188778e728a6e4781acf728123e5b356380fe6f | openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_openshift/library/oc_edit.py | python | Utils._write | (filename, contents) | Actually write the file contents to disk. This helps with mocking. | Actually write the file contents to disk. This helps with mocking. | [
"Actually",
"write",
"the",
"file",
"contents",
"to",
"disk",
".",
"This",
"helps",
"with",
"mocking",
"."
] | def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(str(contents)) | [
"def",
"_write",
"(",
"filename",
",",
"contents",
")",
":",
"with",
"open",
"(",
"filename",
",",
"'w'",
")",
"as",
"sfd",
":",
"sfd",
".",
"write",
"(",
"str",
"(",
"contents",
")",
")"
] | https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.11.28-1/roles/lib_openshift/library/oc_edit.py#L1213-L1217 |
Subsets and Splits