nwo
stringlengths 5
86
| sha
stringlengths 40
40
| path
stringlengths 4
189
| language
stringclasses 1
value | identifier
stringlengths 1
94
| parameters
stringlengths 2
4.03k
| argument_list
stringclasses 1
value | return_statement
stringlengths 0
11.5k
| docstring
stringlengths 1
33.2k
| docstring_summary
stringlengths 0
5.15k
| docstring_tokens
sequence | function
stringlengths 34
151k
| function_tokens
sequence | url
stringlengths 90
278
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
windystrife/UnrealEngine_NVIDIAGameWorks | b50e6338a7c5b26374d66306ebc7807541ff815e | Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/idlelib/RemoteDebugger.py | python | wrap_info | (info) | replace info[2], a traceback instance, by its ID | replace info[2], a traceback instance, by its ID | [
"replace",
"info",
"[",
"2",
"]",
"a",
"traceback",
"instance",
"by",
"its",
"ID"
] | def wrap_info(info):
"replace info[2], a traceback instance, by its ID"
if info is None:
return None
else:
traceback = info[2]
assert isinstance(traceback, types.TracebackType)
traceback_id = id(traceback)
tracebacktable[traceback_id] = traceback
modified_info = (info[0], info[1], traceback_id)
return modified_info | [
"def",
"wrap_info",
"(",
"info",
")",
":",
"if",
"info",
"is",
"None",
":",
"return",
"None",
"else",
":",
"traceback",
"=",
"info",
"[",
"2",
"]",
"assert",
"isinstance",
"(",
"traceback",
",",
"types",
".",
"TracebackType",
")",
"traceback_id",
"=",
"id",
"(",
"traceback",
")",
"tracebacktable",
"[",
"traceback_id",
"]",
"=",
"traceback",
"modified_info",
"=",
"(",
"info",
"[",
"0",
"]",
",",
"info",
"[",
"1",
"]",
",",
"traceback_id",
")",
"return",
"modified_info"
] | https://github.com/windystrife/UnrealEngine_NVIDIAGameWorks/blob/b50e6338a7c5b26374d66306ebc7807541ff815e/Engine/Extras/ThirdPartyNotUE/emsdk/Win64/python/2.7.5.3_64bit/Lib/idlelib/RemoteDebugger.py#L46-L56 |
||
SmartisanTech/Wrench | 27f3c17692910997bba3a3c9fd88c8717497aac6 | extra-cmake-modules/usr/share/ECM/find-modules/sip_generator.py | python | SipGenerator._container_get | (self, container, level, h_file, include_filename) | return body | Generate the (recursive) translation for a class or namespace.
:param container: A class or namespace.
:param h_file: Name of header file being processed.
:param level: Recursion level controls indentation.
:return: A string. | Generate the (recursive) translation for a class or namespace. | [
"Generate",
"the",
"(",
"recursive",
")",
"translation",
"for",
"a",
"class",
"or",
"namespace",
"."
] | def _container_get(self, container, level, h_file, include_filename):
"""
Generate the (recursive) translation for a class or namespace.
:param container: A class or namespace.
:param h_file: Name of header file being processed.
:param level: Recursion level controls indentation.
:return: A string.
"""
def skippable_attribute(member, text):
"""
We don't seem to have access to the __attribute__(())s, but at least we can look for stuff we care about.
:param member: The attribute.
:param text: The raw source corresponding to the region of member.
"""
if text.find("_DEPRECATED") != -1:
sip["annotations"].add("Deprecated")
return True
SipGenerator._report_ignoring(container, member, text)
sip = {
"name": container.displayname,
"annotations": set()
}
name = container.displayname
if container.access_specifier == AccessSpecifier.PRIVATE:
if self.dump_privates:
logger.debug("Ignoring private {}".format(SipGenerator.describe(container)))
return ""
body = ""
base_specifiers = []
template_type_parameters = []
had_copy_constructor = False
had_const_member = False
for member in container.get_children():
#
# Only emit items in the translation unit.
#
if member.location.file.name != self.tu.spelling:
continue
decl = ""
if member.kind in [CursorKind.CXX_METHOD, CursorKind.FUNCTION_DECL, CursorKind.FUNCTION_TEMPLATE,
CursorKind.CONSTRUCTOR, CursorKind.DESTRUCTOR, CursorKind.CONVERSION_FUNCTION]:
decl = self._fn_get(container, member, level + 1)
elif member.kind == CursorKind.ENUM_DECL:
decl = self._enum_get(container, member, level + 1) + ";\n"
elif member.kind == CursorKind.CXX_ACCESS_SPEC_DECL:
decl = self._get_access_specifier(member, level + 1)
elif member.kind == CursorKind.TYPEDEF_DECL:
decl = self._typedef_get(member, level + 1)
elif member.kind == CursorKind.CXX_BASE_SPECIFIER:
#
# Strip off the leading "class". Except for TypeKind.UNEXPOSED...
#
base_specifiers.append(member.displayname.split(None, 2)[-1])
elif member.kind == CursorKind.TEMPLATE_TYPE_PARAMETER:
template_type_parameters.append(member.displayname)
elif member.kind == CursorKind.TEMPLATE_NON_TYPE_PARAMETER:
template_type_parameters.append(member.type.spelling + " " + member.displayname)
elif member.kind in [CursorKind.VAR_DECL, CursorKind.FIELD_DECL]:
had_const_member = had_const_member or member.type.is_const_qualified()
decl = self._var_get(container, member, level + 1)
elif member.kind in [CursorKind.NAMESPACE, CursorKind.CLASS_DECL,
CursorKind.CLASS_TEMPLATE, CursorKind.CLASS_TEMPLATE_PARTIAL_SPECIALIZATION,
CursorKind.STRUCT_DECL, CursorKind.UNION_DECL]:
decl = self._container_get(member, level + 1, h_file, include_filename)
elif member.kind in TEMPLATE_KINDS + [CursorKind.USING_DECLARATION, CursorKind.USING_DIRECTIVE,
CursorKind.CXX_FINAL_ATTR]:
#
# Ignore:
#
# TEMPLATE_KINDS: Template type parameter.
# CursorKind.USING_DECLARATION, CursorKind.USING_DIRECTIVE: Using? Pah!
# CursorKind.CXX_FINAL_ATTR: Again, not much to be done with this.
#
pass
else:
SipGenerator._report_ignoring(container, member)
def is_copy_constructor(member):
if member.kind != CursorKind.CONSTRUCTOR:
return False
numParams = 0
hasSelfType = False
for child in member.get_children():
numParams += 1
if child.kind == CursorKind.PARM_DECL:
paramType = child.type.spelling
paramType = paramType.split("::")[-1]
paramType = paramType.replace("const", "").replace("&", "").strip()
hasSelfType = paramType == container.displayname
return numParams == 1 and hasSelfType
def has_parameter_default(parameter):
for member in parameter.get_children():
if member.kind.is_expression():
return True
return False
def is_default_constructor(member):
if member.kind != CursorKind.CONSTRUCTOR:
return False
numParams = 0
for parameter in member.get_children():
if (has_parameter_default(parameter)):
break
numParams += 1
return numParams == 0
had_copy_constructor = had_copy_constructor or is_copy_constructor(member)
#
# Discard almost anything which is private.
#
if member.access_specifier == AccessSpecifier.PRIVATE:
if member.kind == CursorKind.CXX_ACCESS_SPEC_DECL:
#
# We need these because...
#
pass
elif is_copy_constructor(member) or is_default_constructor(member):
#
# ...we need to pass private copy contructors to the SIP compiler.
#
pass
else:
if self.dump_privates:
logger.debug("Ignoring private {}".format(SipGenerator.describe(member)))
continue
if decl:
if self.verbose:
pad = " " * ((level + 1) * 4)
body += pad + "// {}\n".format(SipGenerator.describe(member))
body += decl
#
# Empty containers are still useful if they provide namespaces or forward declarations.
#
if not body and level >= 0:
body = "\n"
text = self._read_source(container.extent)
if not text.endswith("}"):
#
# Forward declaration.
#
sip["annotations"].add("External")
if body and level >= 0:
if container.kind == CursorKind.NAMESPACE:
container_type = "namespace " + name
elif container.kind in [CursorKind.CLASS_DECL, CursorKind.CLASS_TEMPLATE,
CursorKind.CLASS_TEMPLATE_PARTIAL_SPECIALIZATION]:
container_type = "class " + name
elif container.kind == CursorKind.STRUCT_DECL:
container_type = "struct " + name
elif container.kind == CursorKind.UNION_DECL:
container_type = "union " + name
else:
raise AssertionError(
_("Unexpected container {}: {}[{}]").format(container.kind, name, container.extent.start.line))
#
# Generate private copy constructor for non-copyable types.
#
if had_const_member and not had_copy_constructor:
body += " private:\n {}(const {} &); // Generated\n".format(name, container.type.get_canonical().spelling)
#
# Flesh out the SIP context for the rules engine.
#
sip["template_parameters"] = ", ".join(template_type_parameters)
sip["decl"] = container_type
sip["base_specifiers"] = ", ".join(base_specifiers)
sip["body"] = body
self.rules.container_rules().apply(container, sip)
pad = " " * (level * 4)
if sip["name"]:
decl = pad + sip["decl"]
if "External" in sip["annotations"]:
#
# SIP /External/ does not seem to work as one might wish. Suppress.
#
body = decl + " /External/;\n"
body = pad + "// Discarded {}\n".format(SipGenerator.describe(container))
else:
if sip["base_specifiers"]:
decl += ": " + sip["base_specifiers"]
if sip["annotations"]:
decl += " /" + ",".join(sip["annotations"]) + "/"
if sip["template_parameters"]:
decl = pad + "template <" + sip["template_parameters"] + ">\n" + decl
decl += "\n" + pad + "{\n"
decl += "%TypeHeaderCode\n#include <{}>\n%End\n".format(include_filename)
body = decl + sip["body"] + pad + "};\n"
else:
body = pad + "// Discarded {}\n".format(SipGenerator.describe(container))
return body | [
"def",
"_container_get",
"(",
"self",
",",
"container",
",",
"level",
",",
"h_file",
",",
"include_filename",
")",
":",
"def",
"skippable_attribute",
"(",
"member",
",",
"text",
")",
":",
"\"\"\"\n We don't seem to have access to the __attribute__(())s, but at least we can look for stuff we care about.\n\n :param member: The attribute.\n :param text: The raw source corresponding to the region of member.\n \"\"\"",
"if",
"text",
".",
"find",
"(",
"\"_DEPRECATED\"",
")",
"!=",
"-",
"1",
":",
"sip",
"[",
"\"annotations\"",
"]",
".",
"add",
"(",
"\"Deprecated\"",
")",
"return",
"True",
"SipGenerator",
".",
"_report_ignoring",
"(",
"container",
",",
"member",
",",
"text",
")",
"sip",
"=",
"{",
"\"name\"",
":",
"container",
".",
"displayname",
",",
"\"annotations\"",
":",
"set",
"(",
")",
"}",
"name",
"=",
"container",
".",
"displayname",
"if",
"container",
".",
"access_specifier",
"==",
"AccessSpecifier",
".",
"PRIVATE",
":",
"if",
"self",
".",
"dump_privates",
":",
"logger",
".",
"debug",
"(",
"\"Ignoring private {}\"",
".",
"format",
"(",
"SipGenerator",
".",
"describe",
"(",
"container",
")",
")",
")",
"return",
"\"\"",
"body",
"=",
"\"\"",
"base_specifiers",
"=",
"[",
"]",
"template_type_parameters",
"=",
"[",
"]",
"had_copy_constructor",
"=",
"False",
"had_const_member",
"=",
"False",
"for",
"member",
"in",
"container",
".",
"get_children",
"(",
")",
":",
"#",
"# Only emit items in the translation unit.",
"#",
"if",
"member",
".",
"location",
".",
"file",
".",
"name",
"!=",
"self",
".",
"tu",
".",
"spelling",
":",
"continue",
"decl",
"=",
"\"\"",
"if",
"member",
".",
"kind",
"in",
"[",
"CursorKind",
".",
"CXX_METHOD",
",",
"CursorKind",
".",
"FUNCTION_DECL",
",",
"CursorKind",
".",
"FUNCTION_TEMPLATE",
",",
"CursorKind",
".",
"CONSTRUCTOR",
",",
"CursorKind",
".",
"DESTRUCTOR",
",",
"CursorKind",
".",
"CONVERSION_FUNCTION",
"]",
":",
"decl",
"=",
"self",
".",
"_fn_get",
"(",
"container",
",",
"member",
",",
"level",
"+",
"1",
")",
"elif",
"member",
".",
"kind",
"==",
"CursorKind",
".",
"ENUM_DECL",
":",
"decl",
"=",
"self",
".",
"_enum_get",
"(",
"container",
",",
"member",
",",
"level",
"+",
"1",
")",
"+",
"\";\\n\"",
"elif",
"member",
".",
"kind",
"==",
"CursorKind",
".",
"CXX_ACCESS_SPEC_DECL",
":",
"decl",
"=",
"self",
".",
"_get_access_specifier",
"(",
"member",
",",
"level",
"+",
"1",
")",
"elif",
"member",
".",
"kind",
"==",
"CursorKind",
".",
"TYPEDEF_DECL",
":",
"decl",
"=",
"self",
".",
"_typedef_get",
"(",
"member",
",",
"level",
"+",
"1",
")",
"elif",
"member",
".",
"kind",
"==",
"CursorKind",
".",
"CXX_BASE_SPECIFIER",
":",
"#",
"# Strip off the leading \"class\". Except for TypeKind.UNEXPOSED...",
"#",
"base_specifiers",
".",
"append",
"(",
"member",
".",
"displayname",
".",
"split",
"(",
"None",
",",
"2",
")",
"[",
"-",
"1",
"]",
")",
"elif",
"member",
".",
"kind",
"==",
"CursorKind",
".",
"TEMPLATE_TYPE_PARAMETER",
":",
"template_type_parameters",
".",
"append",
"(",
"member",
".",
"displayname",
")",
"elif",
"member",
".",
"kind",
"==",
"CursorKind",
".",
"TEMPLATE_NON_TYPE_PARAMETER",
":",
"template_type_parameters",
".",
"append",
"(",
"member",
".",
"type",
".",
"spelling",
"+",
"\" \"",
"+",
"member",
".",
"displayname",
")",
"elif",
"member",
".",
"kind",
"in",
"[",
"CursorKind",
".",
"VAR_DECL",
",",
"CursorKind",
".",
"FIELD_DECL",
"]",
":",
"had_const_member",
"=",
"had_const_member",
"or",
"member",
".",
"type",
".",
"is_const_qualified",
"(",
")",
"decl",
"=",
"self",
".",
"_var_get",
"(",
"container",
",",
"member",
",",
"level",
"+",
"1",
")",
"elif",
"member",
".",
"kind",
"in",
"[",
"CursorKind",
".",
"NAMESPACE",
",",
"CursorKind",
".",
"CLASS_DECL",
",",
"CursorKind",
".",
"CLASS_TEMPLATE",
",",
"CursorKind",
".",
"CLASS_TEMPLATE_PARTIAL_SPECIALIZATION",
",",
"CursorKind",
".",
"STRUCT_DECL",
",",
"CursorKind",
".",
"UNION_DECL",
"]",
":",
"decl",
"=",
"self",
".",
"_container_get",
"(",
"member",
",",
"level",
"+",
"1",
",",
"h_file",
",",
"include_filename",
")",
"elif",
"member",
".",
"kind",
"in",
"TEMPLATE_KINDS",
"+",
"[",
"CursorKind",
".",
"USING_DECLARATION",
",",
"CursorKind",
".",
"USING_DIRECTIVE",
",",
"CursorKind",
".",
"CXX_FINAL_ATTR",
"]",
":",
"#",
"# Ignore:",
"#",
"# TEMPLATE_KINDS: Template type parameter.",
"# CursorKind.USING_DECLARATION, CursorKind.USING_DIRECTIVE: Using? Pah!",
"# CursorKind.CXX_FINAL_ATTR: Again, not much to be done with this.",
"#",
"pass",
"else",
":",
"SipGenerator",
".",
"_report_ignoring",
"(",
"container",
",",
"member",
")",
"def",
"is_copy_constructor",
"(",
"member",
")",
":",
"if",
"member",
".",
"kind",
"!=",
"CursorKind",
".",
"CONSTRUCTOR",
":",
"return",
"False",
"numParams",
"=",
"0",
"hasSelfType",
"=",
"False",
"for",
"child",
"in",
"member",
".",
"get_children",
"(",
")",
":",
"numParams",
"+=",
"1",
"if",
"child",
".",
"kind",
"==",
"CursorKind",
".",
"PARM_DECL",
":",
"paramType",
"=",
"child",
".",
"type",
".",
"spelling",
"paramType",
"=",
"paramType",
".",
"split",
"(",
"\"::\"",
")",
"[",
"-",
"1",
"]",
"paramType",
"=",
"paramType",
".",
"replace",
"(",
"\"const\"",
",",
"\"\"",
")",
".",
"replace",
"(",
"\"&\"",
",",
"\"\"",
")",
".",
"strip",
"(",
")",
"hasSelfType",
"=",
"paramType",
"==",
"container",
".",
"displayname",
"return",
"numParams",
"==",
"1",
"and",
"hasSelfType",
"def",
"has_parameter_default",
"(",
"parameter",
")",
":",
"for",
"member",
"in",
"parameter",
".",
"get_children",
"(",
")",
":",
"if",
"member",
".",
"kind",
".",
"is_expression",
"(",
")",
":",
"return",
"True",
"return",
"False",
"def",
"is_default_constructor",
"(",
"member",
")",
":",
"if",
"member",
".",
"kind",
"!=",
"CursorKind",
".",
"CONSTRUCTOR",
":",
"return",
"False",
"numParams",
"=",
"0",
"for",
"parameter",
"in",
"member",
".",
"get_children",
"(",
")",
":",
"if",
"(",
"has_parameter_default",
"(",
"parameter",
")",
")",
":",
"break",
"numParams",
"+=",
"1",
"return",
"numParams",
"==",
"0",
"had_copy_constructor",
"=",
"had_copy_constructor",
"or",
"is_copy_constructor",
"(",
"member",
")",
"#",
"# Discard almost anything which is private.",
"#",
"if",
"member",
".",
"access_specifier",
"==",
"AccessSpecifier",
".",
"PRIVATE",
":",
"if",
"member",
".",
"kind",
"==",
"CursorKind",
".",
"CXX_ACCESS_SPEC_DECL",
":",
"#",
"# We need these because...",
"#",
"pass",
"elif",
"is_copy_constructor",
"(",
"member",
")",
"or",
"is_default_constructor",
"(",
"member",
")",
":",
"#",
"# ...we need to pass private copy contructors to the SIP compiler.",
"#",
"pass",
"else",
":",
"if",
"self",
".",
"dump_privates",
":",
"logger",
".",
"debug",
"(",
"\"Ignoring private {}\"",
".",
"format",
"(",
"SipGenerator",
".",
"describe",
"(",
"member",
")",
")",
")",
"continue",
"if",
"decl",
":",
"if",
"self",
".",
"verbose",
":",
"pad",
"=",
"\" \"",
"*",
"(",
"(",
"level",
"+",
"1",
")",
"*",
"4",
")",
"body",
"+=",
"pad",
"+",
"\"// {}\\n\"",
".",
"format",
"(",
"SipGenerator",
".",
"describe",
"(",
"member",
")",
")",
"body",
"+=",
"decl",
"#",
"# Empty containers are still useful if they provide namespaces or forward declarations.",
"#",
"if",
"not",
"body",
"and",
"level",
">=",
"0",
":",
"body",
"=",
"\"\\n\"",
"text",
"=",
"self",
".",
"_read_source",
"(",
"container",
".",
"extent",
")",
"if",
"not",
"text",
".",
"endswith",
"(",
"\"}\"",
")",
":",
"#",
"# Forward declaration.",
"#",
"sip",
"[",
"\"annotations\"",
"]",
".",
"add",
"(",
"\"External\"",
")",
"if",
"body",
"and",
"level",
">=",
"0",
":",
"if",
"container",
".",
"kind",
"==",
"CursorKind",
".",
"NAMESPACE",
":",
"container_type",
"=",
"\"namespace \"",
"+",
"name",
"elif",
"container",
".",
"kind",
"in",
"[",
"CursorKind",
".",
"CLASS_DECL",
",",
"CursorKind",
".",
"CLASS_TEMPLATE",
",",
"CursorKind",
".",
"CLASS_TEMPLATE_PARTIAL_SPECIALIZATION",
"]",
":",
"container_type",
"=",
"\"class \"",
"+",
"name",
"elif",
"container",
".",
"kind",
"==",
"CursorKind",
".",
"STRUCT_DECL",
":",
"container_type",
"=",
"\"struct \"",
"+",
"name",
"elif",
"container",
".",
"kind",
"==",
"CursorKind",
".",
"UNION_DECL",
":",
"container_type",
"=",
"\"union \"",
"+",
"name",
"else",
":",
"raise",
"AssertionError",
"(",
"_",
"(",
"\"Unexpected container {}: {}[{}]\"",
")",
".",
"format",
"(",
"container",
".",
"kind",
",",
"name",
",",
"container",
".",
"extent",
".",
"start",
".",
"line",
")",
")",
"#",
"# Generate private copy constructor for non-copyable types.",
"#",
"if",
"had_const_member",
"and",
"not",
"had_copy_constructor",
":",
"body",
"+=",
"\" private:\\n {}(const {} &); // Generated\\n\"",
".",
"format",
"(",
"name",
",",
"container",
".",
"type",
".",
"get_canonical",
"(",
")",
".",
"spelling",
")",
"#",
"# Flesh out the SIP context for the rules engine.",
"#",
"sip",
"[",
"\"template_parameters\"",
"]",
"=",
"\", \"",
".",
"join",
"(",
"template_type_parameters",
")",
"sip",
"[",
"\"decl\"",
"]",
"=",
"container_type",
"sip",
"[",
"\"base_specifiers\"",
"]",
"=",
"\", \"",
".",
"join",
"(",
"base_specifiers",
")",
"sip",
"[",
"\"body\"",
"]",
"=",
"body",
"self",
".",
"rules",
".",
"container_rules",
"(",
")",
".",
"apply",
"(",
"container",
",",
"sip",
")",
"pad",
"=",
"\" \"",
"*",
"(",
"level",
"*",
"4",
")",
"if",
"sip",
"[",
"\"name\"",
"]",
":",
"decl",
"=",
"pad",
"+",
"sip",
"[",
"\"decl\"",
"]",
"if",
"\"External\"",
"in",
"sip",
"[",
"\"annotations\"",
"]",
":",
"#",
"# SIP /External/ does not seem to work as one might wish. Suppress.",
"#",
"body",
"=",
"decl",
"+",
"\" /External/;\\n\"",
"body",
"=",
"pad",
"+",
"\"// Discarded {}\\n\"",
".",
"format",
"(",
"SipGenerator",
".",
"describe",
"(",
"container",
")",
")",
"else",
":",
"if",
"sip",
"[",
"\"base_specifiers\"",
"]",
":",
"decl",
"+=",
"\": \"",
"+",
"sip",
"[",
"\"base_specifiers\"",
"]",
"if",
"sip",
"[",
"\"annotations\"",
"]",
":",
"decl",
"+=",
"\" /\"",
"+",
"\",\"",
".",
"join",
"(",
"sip",
"[",
"\"annotations\"",
"]",
")",
"+",
"\"/\"",
"if",
"sip",
"[",
"\"template_parameters\"",
"]",
":",
"decl",
"=",
"pad",
"+",
"\"template <\"",
"+",
"sip",
"[",
"\"template_parameters\"",
"]",
"+",
"\">\\n\"",
"+",
"decl",
"decl",
"+=",
"\"\\n\"",
"+",
"pad",
"+",
"\"{\\n\"",
"decl",
"+=",
"\"%TypeHeaderCode\\n#include <{}>\\n%End\\n\"",
".",
"format",
"(",
"include_filename",
")",
"body",
"=",
"decl",
"+",
"sip",
"[",
"\"body\"",
"]",
"+",
"pad",
"+",
"\"};\\n\"",
"else",
":",
"body",
"=",
"pad",
"+",
"\"// Discarded {}\\n\"",
".",
"format",
"(",
"SipGenerator",
".",
"describe",
"(",
"container",
")",
")",
"return",
"body"
] | https://github.com/SmartisanTech/Wrench/blob/27f3c17692910997bba3a3c9fd88c8717497aac6/extra-cmake-modules/usr/share/ECM/find-modules/sip_generator.py#L135-L329 |
|
snap-stanford/snap-python | d53c51b0a26aa7e3e7400b014cdf728948fde80a | setup/snap.py | python | TIntIntVH.__init__ | (self, *args) | __init__(THash<(TInt,TVec<(TInt,int)>)> self) -> TIntIntVH
__init__(THash<(TInt,TVec<(TInt,int)>)> self, TIntIntVH Hash) -> TIntIntVH
Parameters:
Hash: THash< TInt,TVec< TInt,int > > const &
__init__(THash<(TInt,TVec<(TInt,int)>)> self, int const & ExpectVals, bool const & _AutoSizeP=False) -> TIntIntVH
Parameters:
ExpectVals: int const &
_AutoSizeP: bool const &
__init__(THash<(TInt,TVec<(TInt,int)>)> self, int const & ExpectVals) -> TIntIntVH
Parameters:
ExpectVals: int const &
__init__(THash<(TInt,TVec<(TInt,int)>)> self, TSIn SIn) -> TIntIntVH
Parameters:
SIn: TSIn & | __init__(THash<(TInt,TVec<(TInt,int)>)> self) -> TIntIntVH
__init__(THash<(TInt,TVec<(TInt,int)>)> self, TIntIntVH Hash) -> TIntIntVH | [
"__init__",
"(",
"THash<",
"(",
"TInt",
"TVec<",
"(",
"TInt",
"int",
")",
">",
")",
">",
"self",
")",
"-",
">",
"TIntIntVH",
"__init__",
"(",
"THash<",
"(",
"TInt",
"TVec<",
"(",
"TInt",
"int",
")",
">",
")",
">",
"self",
"TIntIntVH",
"Hash",
")",
"-",
">",
"TIntIntVH"
] | def __init__(self, *args):
"""
__init__(THash<(TInt,TVec<(TInt,int)>)> self) -> TIntIntVH
__init__(THash<(TInt,TVec<(TInt,int)>)> self, TIntIntVH Hash) -> TIntIntVH
Parameters:
Hash: THash< TInt,TVec< TInt,int > > const &
__init__(THash<(TInt,TVec<(TInt,int)>)> self, int const & ExpectVals, bool const & _AutoSizeP=False) -> TIntIntVH
Parameters:
ExpectVals: int const &
_AutoSizeP: bool const &
__init__(THash<(TInt,TVec<(TInt,int)>)> self, int const & ExpectVals) -> TIntIntVH
Parameters:
ExpectVals: int const &
__init__(THash<(TInt,TVec<(TInt,int)>)> self, TSIn SIn) -> TIntIntVH
Parameters:
SIn: TSIn &
"""
_snap.TIntIntVH_swiginit(self,_snap.new_TIntIntVH(*args)) | [
"def",
"__init__",
"(",
"self",
",",
"*",
"args",
")",
":",
"_snap",
".",
"TIntIntVH_swiginit",
"(",
"self",
",",
"_snap",
".",
"new_TIntIntVH",
"(",
"*",
"args",
")",
")"
] | https://github.com/snap-stanford/snap-python/blob/d53c51b0a26aa7e3e7400b014cdf728948fde80a/setup/snap.py#L17638-L17663 |
||
baidu-research/tensorflow-allreduce | 66d5b855e90b0949e9fa5cca5599fd729a70e874 | tensorflow/contrib/data/python/ops/dataset_ops.py | python | RangeDataset.__init__ | (self, *args) | See `Dataset.range()` for details. | See `Dataset.range()` for details. | [
"See",
"Dataset",
".",
"range",
"()",
"for",
"details",
"."
] | def __init__(self, *args):
"""See `Dataset.range()` for details."""
super(RangeDataset, self).__init__()
self._parse_args(*args) | [
"def",
"__init__",
"(",
"self",
",",
"*",
"args",
")",
":",
"super",
"(",
"RangeDataset",
",",
"self",
")",
".",
"__init__",
"(",
")",
"self",
".",
"_parse_args",
"(",
"*",
"args",
")"
] | https://github.com/baidu-research/tensorflow-allreduce/blob/66d5b855e90b0949e9fa5cca5599fd729a70e874/tensorflow/contrib/data/python/ops/dataset_ops.py#L1274-L1277 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_internal/vcs/git.py | python | Git.has_commit | (cls, location, rev) | Check if rev is a commit that is available in the local repository. | [] | def has_commit(cls, location, rev):
"""
Check if rev is a commit that is available in the local repository.
"""
try:
cls.run_command(
['rev-parse', '-q', '--verify', "sha^" + rev],
cwd=location,
log_failed_cmd=False,
)
except InstallationError:
return False
else:
return True | [
"def",
"has_commit",
"(",
"cls",
",",
"location",
",",
"rev",
")",
":",
"try",
":",
"cls",
".",
"run_command",
"(",
"[",
"'rev-parse'",
",",
"'-q'",
",",
"'--verify'",
",",
"\"sha^\"",
"+",
"rev",
"]",
",",
"cwd",
"=",
"location",
",",
"log_failed_cmd",
"=",
"False",
",",
")",
"except",
"InstallationError",
":",
"return",
"False",
"else",
":",
"return",
"True"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/site-packages/pip/_internal/vcs/git.py#L683-L709 |
|||
weolar/miniblink49 | 1c4678db0594a4abde23d3ebbcc7cd13c3170777 | third_party/WebKit/Tools/Scripts/webkitpy/style/checkers/cpp.py | python | get_line_width | (line) | return len(line) | Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters. | Determines the width of the line in column positions. | [
"Determines",
"the",
"width",
"of",
"the",
"line",
"in",
"column",
"positions",
"."
] | def get_line_width(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if isinstance(line, unicode):
width = 0
for c in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(c) in ('W', 'F'):
width += 2
elif not unicodedata.combining(c):
width += 1
return width
return len(line) | [
"def",
"get_line_width",
"(",
"line",
")",
":",
"if",
"isinstance",
"(",
"line",
",",
"unicode",
")",
":",
"width",
"=",
"0",
"for",
"c",
"in",
"unicodedata",
".",
"normalize",
"(",
"'NFC'",
",",
"line",
")",
":",
"if",
"unicodedata",
".",
"east_asian_width",
"(",
"c",
")",
"in",
"(",
"'W'",
",",
"'F'",
")",
":",
"width",
"+=",
"2",
"elif",
"not",
"unicodedata",
".",
"combining",
"(",
"c",
")",
":",
"width",
"+=",
"1",
"return",
"width",
"return",
"len",
"(",
"line",
")"
] | https://github.com/weolar/miniblink49/blob/1c4678db0594a4abde23d3ebbcc7cd13c3170777/third_party/WebKit/Tools/Scripts/webkitpy/style/checkers/cpp.py#L2664-L2682 |
|
lmb-freiburg/ogn | 974f72ef4bf840d6f6693d22d1843a79223e77ce | scripts/cpp_lint.py | python | CleanseComments | (line) | return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line) | Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed. | Removes //-comments and single-line C-style /* */ comments. | [
"Removes",
"//",
"-",
"comments",
"and",
"single",
"-",
"line",
"C",
"-",
"style",
"/",
"*",
"*",
"/",
"comments",
"."
] | def CleanseComments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
commentpos = line.find('//')
if commentpos != -1 and not IsCppString(line[:commentpos]):
line = line[:commentpos].rstrip()
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line) | [
"def",
"CleanseComments",
"(",
"line",
")",
":",
"commentpos",
"=",
"line",
".",
"find",
"(",
"'//'",
")",
"if",
"commentpos",
"!=",
"-",
"1",
"and",
"not",
"IsCppString",
"(",
"line",
"[",
":",
"commentpos",
"]",
")",
":",
"line",
"=",
"line",
"[",
":",
"commentpos",
"]",
".",
"rstrip",
"(",
")",
"# get rid of /* ... */",
"return",
"_RE_PATTERN_CLEANSE_LINE_C_COMMENTS",
".",
"sub",
"(",
"''",
",",
"line",
")"
] | https://github.com/lmb-freiburg/ogn/blob/974f72ef4bf840d6f6693d22d1843a79223e77ce/scripts/cpp_lint.py#L1167-L1180 |
|
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | qt/python/mantidqtinterfaces/mantidqtinterfaces/Muon/GUI/Common/corrections_tab_widget/corrections_presenter.py | python | CorrectionsPresenter.handle_ads_clear_or_remove_workspace_event | (self, _: str = None) | Handle when there is a clear or remove workspace event in the ADS. | Handle when there is a clear or remove workspace event in the ADS. | [
"Handle",
"when",
"there",
"is",
"a",
"clear",
"or",
"remove",
"workspace",
"event",
"in",
"the",
"ADS",
"."
] | def handle_ads_clear_or_remove_workspace_event(self, _: str = None) -> None:
"""Handle when there is a clear or remove workspace event in the ADS."""
self.dead_time_presenter.handle_ads_clear_or_remove_workspace_event()
self.handle_runs_loaded() | [
"def",
"handle_ads_clear_or_remove_workspace_event",
"(",
"self",
",",
"_",
":",
"str",
"=",
"None",
")",
"->",
"None",
":",
"self",
".",
"dead_time_presenter",
".",
"handle_ads_clear_or_remove_workspace_event",
"(",
")",
"self",
".",
"handle_runs_loaded",
"(",
")"
] | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/qt/python/mantidqtinterfaces/mantidqtinterfaces/Muon/GUI/Common/corrections_tab_widget/corrections_presenter.py#L64-L67 |
||
okex/V3-Open-API-SDK | c5abb0db7e2287718e0055e17e57672ce0ec7fd9 | okex-python-sdk-api/venv/Lib/site-packages/pip-19.0.3-py3.8.egg/pip/_internal/resolve.py | python | Resolver._check_skip_installed | (self, req_to_install) | return None | Check if req_to_install should be skipped.
This will check if the req is installed, and whether we should upgrade
or reinstall it, taking into account all the relevant user options.
After calling this req_to_install will only have satisfied_by set to
None if the req_to_install is to be upgraded/reinstalled etc. Any
other value will be a dist recording the current thing installed that
satisfies the requirement.
Note that for vcs urls and the like we can't assess skipping in this
routine - we simply identify that we need to pull the thing down,
then later on it is pulled down and introspected to assess upgrade/
reinstalls etc.
:return: A text reason for why it was skipped, or None. | Check if req_to_install should be skipped. | [
"Check",
"if",
"req_to_install",
"should",
"be",
"skipped",
"."
] | def _check_skip_installed(self, req_to_install):
# type: (InstallRequirement) -> Optional[str]
"""Check if req_to_install should be skipped.
This will check if the req is installed, and whether we should upgrade
or reinstall it, taking into account all the relevant user options.
After calling this req_to_install will only have satisfied_by set to
None if the req_to_install is to be upgraded/reinstalled etc. Any
other value will be a dist recording the current thing installed that
satisfies the requirement.
Note that for vcs urls and the like we can't assess skipping in this
routine - we simply identify that we need to pull the thing down,
then later on it is pulled down and introspected to assess upgrade/
reinstalls etc.
:return: A text reason for why it was skipped, or None.
"""
if self.ignore_installed:
return None
req_to_install.check_if_exists(self.use_user_site)
if not req_to_install.satisfied_by:
return None
if self.force_reinstall:
self._set_req_to_reinstall(req_to_install)
return None
if not self._is_upgrade_allowed(req_to_install):
if self.upgrade_strategy == "only-if-needed":
return 'already satisfied, skipping upgrade'
return 'already satisfied'
# Check for the possibility of an upgrade. For link-based
# requirements we have to pull the tree down and inspect to assess
# the version #, so it's handled way down.
if not req_to_install.link:
try:
self.finder.find_requirement(req_to_install, upgrade=True)
except BestVersionAlreadyInstalled:
# Then the best version is installed.
return 'already up-to-date'
except DistributionNotFound:
# No distribution found, so we squash the error. It will
# be raised later when we re-try later to do the install.
# Why don't we just raise here?
pass
self._set_req_to_reinstall(req_to_install)
return None | [
"def",
"_check_skip_installed",
"(",
"self",
",",
"req_to_install",
")",
":",
"# type: (InstallRequirement) -> Optional[str]",
"if",
"self",
".",
"ignore_installed",
":",
"return",
"None",
"req_to_install",
".",
"check_if_exists",
"(",
"self",
".",
"use_user_site",
")",
"if",
"not",
"req_to_install",
".",
"satisfied_by",
":",
"return",
"None",
"if",
"self",
".",
"force_reinstall",
":",
"self",
".",
"_set_req_to_reinstall",
"(",
"req_to_install",
")",
"return",
"None",
"if",
"not",
"self",
".",
"_is_upgrade_allowed",
"(",
"req_to_install",
")",
":",
"if",
"self",
".",
"upgrade_strategy",
"==",
"\"only-if-needed\"",
":",
"return",
"'already satisfied, skipping upgrade'",
"return",
"'already satisfied'",
"# Check for the possibility of an upgrade. For link-based",
"# requirements we have to pull the tree down and inspect to assess",
"# the version #, so it's handled way down.",
"if",
"not",
"req_to_install",
".",
"link",
":",
"try",
":",
"self",
".",
"finder",
".",
"find_requirement",
"(",
"req_to_install",
",",
"upgrade",
"=",
"True",
")",
"except",
"BestVersionAlreadyInstalled",
":",
"# Then the best version is installed.",
"return",
"'already up-to-date'",
"except",
"DistributionNotFound",
":",
"# No distribution found, so we squash the error. It will",
"# be raised later when we re-try later to do the install.",
"# Why don't we just raise here?",
"pass",
"self",
".",
"_set_req_to_reinstall",
"(",
"req_to_install",
")",
"return",
"None"
] | https://github.com/okex/V3-Open-API-SDK/blob/c5abb0db7e2287718e0055e17e57672ce0ec7fd9/okex-python-sdk-api/venv/Lib/site-packages/pip-19.0.3-py3.8.egg/pip/_internal/resolve.py#L162-L213 |
|
quantOS-org/DataCore | e2ef9bd2c22ee9e2845675b6435a14fa607f3551 | mdlink/deps/windows/protobuf-2.5.0/python/google/protobuf/internal/containers.py | python | BaseContainer.__init__ | (self, message_listener) | Args:
message_listener: A MessageListener implementation.
The RepeatedScalarFieldContainer will call this object's
Modified() method when it is modified. | Args:
message_listener: A MessageListener implementation.
The RepeatedScalarFieldContainer will call this object's
Modified() method when it is modified. | [
"Args",
":",
"message_listener",
":",
"A",
"MessageListener",
"implementation",
".",
"The",
"RepeatedScalarFieldContainer",
"will",
"call",
"this",
"object",
"s",
"Modified",
"()",
"method",
"when",
"it",
"is",
"modified",
"."
] | def __init__(self, message_listener):
"""
Args:
message_listener: A MessageListener implementation.
The RepeatedScalarFieldContainer will call this object's
Modified() method when it is modified.
"""
self._message_listener = message_listener
self._values = [] | [
"def",
"__init__",
"(",
"self",
",",
"message_listener",
")",
":",
"self",
".",
"_message_listener",
"=",
"message_listener",
"self",
".",
"_values",
"=",
"[",
"]"
] | https://github.com/quantOS-org/DataCore/blob/e2ef9bd2c22ee9e2845675b6435a14fa607f3551/mdlink/deps/windows/protobuf-2.5.0/python/google/protobuf/internal/containers.py#L52-L60 |
||
gnuradio/gr-inspector | 1aa1008b257c9a915b3e29a75092021468a4fe43 | docs/doxygen/doxyxml/generated/index.py | python | DoxygenTypeSub.find_compounds_and_members | (self, details) | return results | Returns a list of all compounds and their members which match details | Returns a list of all compounds and their members which match details | [
"Returns",
"a",
"list",
"of",
"all",
"compounds",
"and",
"their",
"members",
"which",
"match",
"details"
] | def find_compounds_and_members(self, details):
"""
Returns a list of all compounds and their members which match details
"""
results = []
for compound in self.compound:
members = compound.find_members(details)
if members:
results.append([compound, members])
else:
if details.match(compound):
results.append([compound, []])
return results | [
"def",
"find_compounds_and_members",
"(",
"self",
",",
"details",
")",
":",
"results",
"=",
"[",
"]",
"for",
"compound",
"in",
"self",
".",
"compound",
":",
"members",
"=",
"compound",
".",
"find_members",
"(",
"details",
")",
"if",
"members",
":",
"results",
".",
"append",
"(",
"[",
"compound",
",",
"members",
"]",
")",
"else",
":",
"if",
"details",
".",
"match",
"(",
"compound",
")",
":",
"results",
".",
"append",
"(",
"[",
"compound",
",",
"[",
"]",
"]",
")",
"return",
"results"
] | https://github.com/gnuradio/gr-inspector/blob/1aa1008b257c9a915b3e29a75092021468a4fe43/docs/doxygen/doxyxml/generated/index.py#L21-L35 |
|
Polidea/SiriusObfuscator | b0e590d8130e97856afe578869b83a209e2b19be | SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py | python | SBValue.GetChildAtIndex | (self, *args) | return _lldb.SBValue_GetChildAtIndex(self, *args) | GetChildAtIndex(self, uint32_t idx) -> SBValue
GetChildAtIndex(self, uint32_t idx, DynamicValueType use_dynamic, bool can_create_synthetic) -> SBValue
Get a child value by index from a value.
Structs, unions, classes, arrays and pointers have child
values that can be access by index.
Structs and unions access child members using a zero based index
for each child member. For
Classes reserve the first indexes for base classes that have
members (empty base classes are omitted), and all members of the
current class will then follow the base classes.
Pointers differ depending on what they point to. If the pointer
points to a simple type, the child at index zero
is the only child value available, unless synthetic_allowed
is true, in which case the pointer will be used as an array
and can create 'synthetic' child values using positive or
negative indexes. If the pointer points to an aggregate type
(an array, class, union, struct), then the pointee is
transparently skipped and any children are going to be the indexes
of the child values within the aggregate type. For example if
we have a 'Point' type and we have a SBValue that contains a
pointer to a 'Point' type, then the child at index zero will be
the 'x' member, and the child at index 1 will be the 'y' member
(the child at index zero won't be a 'Point' instance).
If you actually need an SBValue that represents the type pointed
to by a SBValue for which GetType().IsPointeeType() returns true,
regardless of the pointee type, you can do that with the SBValue.Dereference
method (or the equivalent deref property).
Arrays have a preset number of children that can be accessed by
index and will returns invalid child values for indexes that are
out of bounds unless the synthetic_allowed is true. In this
case the array can create 'synthetic' child values for indexes
that aren't in the array bounds using positive or negative
indexes.
@param[in] idx
The index of the child value to get
@param[in] use_dynamic
An enumeration that specifies whether to get dynamic values,
and also if the target can be run to figure out the dynamic
type of the child value.
@param[in] synthetic_allowed
If true, then allow child values to be created by index
for pointers and arrays for indexes that normally wouldn't
be allowed.
@return
A new SBValue object that represents the child member value. | GetChildAtIndex(self, uint32_t idx) -> SBValue
GetChildAtIndex(self, uint32_t idx, DynamicValueType use_dynamic, bool can_create_synthetic) -> SBValue | [
"GetChildAtIndex",
"(",
"self",
"uint32_t",
"idx",
")",
"-",
">",
"SBValue",
"GetChildAtIndex",
"(",
"self",
"uint32_t",
"idx",
"DynamicValueType",
"use_dynamic",
"bool",
"can_create_synthetic",
")",
"-",
">",
"SBValue"
] | def GetChildAtIndex(self, *args):
"""
GetChildAtIndex(self, uint32_t idx) -> SBValue
GetChildAtIndex(self, uint32_t idx, DynamicValueType use_dynamic, bool can_create_synthetic) -> SBValue
Get a child value by index from a value.
Structs, unions, classes, arrays and pointers have child
values that can be access by index.
Structs and unions access child members using a zero based index
for each child member. For
Classes reserve the first indexes for base classes that have
members (empty base classes are omitted), and all members of the
current class will then follow the base classes.
Pointers differ depending on what they point to. If the pointer
points to a simple type, the child at index zero
is the only child value available, unless synthetic_allowed
is true, in which case the pointer will be used as an array
and can create 'synthetic' child values using positive or
negative indexes. If the pointer points to an aggregate type
(an array, class, union, struct), then the pointee is
transparently skipped and any children are going to be the indexes
of the child values within the aggregate type. For example if
we have a 'Point' type and we have a SBValue that contains a
pointer to a 'Point' type, then the child at index zero will be
the 'x' member, and the child at index 1 will be the 'y' member
(the child at index zero won't be a 'Point' instance).
If you actually need an SBValue that represents the type pointed
to by a SBValue for which GetType().IsPointeeType() returns true,
regardless of the pointee type, you can do that with the SBValue.Dereference
method (or the equivalent deref property).
Arrays have a preset number of children that can be accessed by
index and will returns invalid child values for indexes that are
out of bounds unless the synthetic_allowed is true. In this
case the array can create 'synthetic' child values for indexes
that aren't in the array bounds using positive or negative
indexes.
@param[in] idx
The index of the child value to get
@param[in] use_dynamic
An enumeration that specifies whether to get dynamic values,
and also if the target can be run to figure out the dynamic
type of the child value.
@param[in] synthetic_allowed
If true, then allow child values to be created by index
for pointers and arrays for indexes that normally wouldn't
be allowed.
@return
A new SBValue object that represents the child member value.
"""
return _lldb.SBValue_GetChildAtIndex(self, *args) | [
"def",
"GetChildAtIndex",
"(",
"self",
",",
"*",
"args",
")",
":",
"return",
"_lldb",
".",
"SBValue_GetChildAtIndex",
"(",
"self",
",",
"*",
"args",
")"
] | https://github.com/Polidea/SiriusObfuscator/blob/b0e590d8130e97856afe578869b83a209e2b19be/SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py#L11951-L12010 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/richtext.py | python | RichTextParagraphLayoutBox.MoveAnchoredObjectToParagraph | (*args, **kwargs) | return _richtext.RichTextParagraphLayoutBox_MoveAnchoredObjectToParagraph(*args, **kwargs) | MoveAnchoredObjectToParagraph(self, RichTextParagraph from, RichTextParagraph to, RichTextObject obj) | MoveAnchoredObjectToParagraph(self, RichTextParagraph from, RichTextParagraph to, RichTextObject obj) | [
"MoveAnchoredObjectToParagraph",
"(",
"self",
"RichTextParagraph",
"from",
"RichTextParagraph",
"to",
"RichTextObject",
"obj",
")"
] | def MoveAnchoredObjectToParagraph(*args, **kwargs):
"""MoveAnchoredObjectToParagraph(self, RichTextParagraph from, RichTextParagraph to, RichTextObject obj)"""
return _richtext.RichTextParagraphLayoutBox_MoveAnchoredObjectToParagraph(*args, **kwargs) | [
"def",
"MoveAnchoredObjectToParagraph",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_richtext",
".",
"RichTextParagraphLayoutBox_MoveAnchoredObjectToParagraph",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/richtext.py#L1640-L1642 |
|
Yelp/MOE | 5b5a6a2c6c3cf47320126f7f5894e2a83e347f5c | moe/easy_interface/experiment.py | python | Experiment.__str__ | (self) | return pprint.pformat(self.build_json_payload) | Return a pprint formated version of the experiment dict. | Return a pprint formated version of the experiment dict. | [
"Return",
"a",
"pprint",
"formated",
"version",
"of",
"the",
"experiment",
"dict",
"."
] | def __str__(self):
"""Return a pprint formated version of the experiment dict."""
return pprint.pformat(self.build_json_payload) | [
"def",
"__str__",
"(",
"self",
")",
":",
"return",
"pprint",
".",
"pformat",
"(",
"self",
".",
"build_json_payload",
")"
] | https://github.com/Yelp/MOE/blob/5b5a6a2c6c3cf47320126f7f5894e2a83e347f5c/moe/easy_interface/experiment.py#L42-L44 |
|
mindspore-ai/mindspore | fb8fd3338605bb34fa5cea054e535a8b1d753fab | mindspore/python/mindspore/dataset/engine/samplers.py | python | SequentialSampler.parse_for_minddataset | (self) | return c_sampler | Parse the sampler for MindRecord. | Parse the sampler for MindRecord. | [
"Parse",
"the",
"sampler",
"for",
"MindRecord",
"."
] | def parse_for_minddataset(self):
"""Parse the sampler for MindRecord."""
start_index = self.start_index if self.start_index is not None else 0
num_samples = self.num_samples if self.num_samples is not None else 0
c_sampler = cde.MindrecordSequentialSampler(num_samples, start_index)
c_child_sampler = self.parse_child_for_minddataset()
c_sampler.add_child(c_child_sampler)
c_sampler.set_num_samples(num_samples)
return c_sampler | [
"def",
"parse_for_minddataset",
"(",
"self",
")",
":",
"start_index",
"=",
"self",
".",
"start_index",
"if",
"self",
".",
"start_index",
"is",
"not",
"None",
"else",
"0",
"num_samples",
"=",
"self",
".",
"num_samples",
"if",
"self",
".",
"num_samples",
"is",
"not",
"None",
"else",
"0",
"c_sampler",
"=",
"cde",
".",
"MindrecordSequentialSampler",
"(",
"num_samples",
",",
"start_index",
")",
"c_child_sampler",
"=",
"self",
".",
"parse_child_for_minddataset",
"(",
")",
"c_sampler",
".",
"add_child",
"(",
"c_child_sampler",
")",
"c_sampler",
".",
"set_num_samples",
"(",
"num_samples",
")",
"return",
"c_sampler"
] | https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/dataset/engine/samplers.py#L620-L628 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python/src/Lib/threading.py | python | _Event.wait | (self, timeout=None) | Block until the internal flag is true.
If the internal flag is true on entry, return immediately. Otherwise,
block until another thread calls set() to set the flag to true, or until
the optional timeout occurs.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof).
This method returns the internal flag on exit, so it will always return
True except if a timeout is given and the operation times out. | Block until the internal flag is true. | [
"Block",
"until",
"the",
"internal",
"flag",
"is",
"true",
"."
] | def wait(self, timeout=None):
"""Block until the internal flag is true.
If the internal flag is true on entry, return immediately. Otherwise,
block until another thread calls set() to set the flag to true, or until
the optional timeout occurs.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof).
This method returns the internal flag on exit, so it will always return
True except if a timeout is given and the operation times out.
"""
with self.__cond:
if not self.__flag:
self.__cond.wait(timeout)
return self.__flag | [
"def",
"wait",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"with",
"self",
".",
"__cond",
":",
"if",
"not",
"self",
".",
"__flag",
":",
"self",
".",
"__cond",
".",
"wait",
"(",
"timeout",
")",
"return",
"self",
".",
"__flag"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/threading.py#L597-L615 |
||
SpenceKonde/megaTinyCore | 1c4a70b18a149fe6bcb551dfa6db11ca50b8997b | megaavr/tools/libs/intelhex/__init__.py | python | IntelHex.addresses | (self) | return aa | Returns all used addresses in sorted order.
@return list of occupied data addresses in sorted order. | Returns all used addresses in sorted order. | [
"Returns",
"all",
"used",
"addresses",
"in",
"sorted",
"order",
"."
] | def addresses(self):
'''Returns all used addresses in sorted order.
@return list of occupied data addresses in sorted order.
'''
aa = dict_keys(self._buf)
aa.sort()
return aa | [
"def",
"addresses",
"(",
"self",
")",
":",
"aa",
"=",
"dict_keys",
"(",
"self",
".",
"_buf",
")",
"aa",
".",
"sort",
"(",
")",
"return",
"aa"
] | https://github.com/SpenceKonde/megaTinyCore/blob/1c4a70b18a149fe6bcb551dfa6db11ca50b8997b/megaavr/tools/libs/intelhex/__init__.py#L420-L426 |
|
okex/V3-Open-API-SDK | c5abb0db7e2287718e0055e17e57672ce0ec7fd9 | okex-python-sdk-api/venv/Lib/site-packages/pip-19.0.3-py3.8.egg/pip/_internal/pep425tags.py | python | get_supported | (
versions=None, # type: Optional[List[str]]
noarch=False, # type: bool
platform=None, # type: Optional[str]
impl=None, # type: Optional[str]
abi=None # type: Optional[str]
) | return supported | Return a list of supported tags for each version specified in
`versions`.
:param versions: a list of string versions, of the form ["33", "32"],
or None. The first version will be assumed to support our ABI.
:param platform: specify the exact platform you want valid
tags for, or None. If None, use the local system platform.
:param impl: specify the exact implementation you want valid
tags for, or None. If None, use the local interpreter impl.
:param abi: specify the exact abi you want valid
tags for, or None. If None, use the local interpreter abi. | Return a list of supported tags for each version specified in
`versions`. | [
"Return",
"a",
"list",
"of",
"supported",
"tags",
"for",
"each",
"version",
"specified",
"in",
"versions",
"."
] | def get_supported(
versions=None, # type: Optional[List[str]]
noarch=False, # type: bool
platform=None, # type: Optional[str]
impl=None, # type: Optional[str]
abi=None # type: Optional[str]
):
# type: (...) -> List[Pep425Tag]
"""Return a list of supported tags for each version specified in
`versions`.
:param versions: a list of string versions, of the form ["33", "32"],
or None. The first version will be assumed to support our ABI.
:param platform: specify the exact platform you want valid
tags for, or None. If None, use the local system platform.
:param impl: specify the exact implementation you want valid
tags for, or None. If None, use the local interpreter impl.
:param abi: specify the exact abi you want valid
tags for, or None. If None, use the local interpreter abi.
"""
supported = []
# Versions must be given with respect to the preference
if versions is None:
version_info = get_impl_version_info()
versions = get_all_minor_versions_as_strings(version_info)
impl = impl or get_abbr_impl()
abis = [] # type: List[str]
abi = abi or get_abi_tag()
if abi:
abis[0:0] = [abi]
abi3s = set()
for suffix in get_extension_suffixes():
if suffix.startswith('.abi'):
abi3s.add(suffix.split('.', 2)[1])
abis.extend(sorted(list(abi3s)))
abis.append('none')
if not noarch:
arch = platform or get_platform()
arch_prefix, arch_sep, arch_suffix = arch.partition('_')
if arch.startswith('macosx'):
# support macosx-10.6-intel on macosx-10.9-x86_64
match = _osx_arch_pat.match(arch)
if match:
name, major, minor, actual_arch = match.groups()
tpl = '{}_{}_%i_%s'.format(name, major)
arches = []
for m in reversed(range(int(minor) + 1)):
for a in get_darwin_arches(int(major), m, actual_arch):
arches.append(tpl % (m, a))
else:
# arch pattern didn't match (?!)
arches = [arch]
elif arch_prefix == 'manylinux2010':
# manylinux1 wheels run on most manylinux2010 systems with the
# exception of wheels depending on ncurses. PEP 571 states
# manylinux1 wheels should be considered manylinux2010 wheels:
# https://www.python.org/dev/peps/pep-0571/#backwards-compatibility-with-manylinux1-wheels
arches = [arch, 'manylinux1' + arch_sep + arch_suffix]
elif platform is None:
arches = []
if is_manylinux2010_compatible():
arches.append('manylinux2010' + arch_sep + arch_suffix)
if is_manylinux1_compatible():
arches.append('manylinux1' + arch_sep + arch_suffix)
arches.append(arch)
else:
arches = [arch]
# Current version, current API (built specifically for our Python):
for abi in abis:
for arch in arches:
supported.append(('%s%s' % (impl, versions[0]), abi, arch))
# abi3 modules compatible with older version of Python
for version in versions[1:]:
# abi3 was introduced in Python 3.2
if version in {'31', '30'}:
break
for abi in abi3s: # empty set if not Python 3
for arch in arches:
supported.append(("%s%s" % (impl, version), abi, arch))
# Has binaries, does not use the Python API:
for arch in arches:
supported.append(('py%s' % (versions[0][0]), 'none', arch))
# No abi / arch, but requires our implementation:
supported.append(('%s%s' % (impl, versions[0]), 'none', 'any'))
# Tagged specifically as being cross-version compatible
# (with just the major version specified)
supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any'))
# No abi / arch, generic Python
for i, version in enumerate(versions):
supported.append(('py%s' % (version,), 'none', 'any'))
if i == 0:
supported.append(('py%s' % (version[0]), 'none', 'any'))
return supported | [
"def",
"get_supported",
"(",
"versions",
"=",
"None",
",",
"# type: Optional[List[str]]",
"noarch",
"=",
"False",
",",
"# type: bool",
"platform",
"=",
"None",
",",
"# type: Optional[str]",
"impl",
"=",
"None",
",",
"# type: Optional[str]",
"abi",
"=",
"None",
"# type: Optional[str]",
")",
":",
"# type: (...) -> List[Pep425Tag]",
"supported",
"=",
"[",
"]",
"# Versions must be given with respect to the preference",
"if",
"versions",
"is",
"None",
":",
"version_info",
"=",
"get_impl_version_info",
"(",
")",
"versions",
"=",
"get_all_minor_versions_as_strings",
"(",
"version_info",
")",
"impl",
"=",
"impl",
"or",
"get_abbr_impl",
"(",
")",
"abis",
"=",
"[",
"]",
"# type: List[str]",
"abi",
"=",
"abi",
"or",
"get_abi_tag",
"(",
")",
"if",
"abi",
":",
"abis",
"[",
"0",
":",
"0",
"]",
"=",
"[",
"abi",
"]",
"abi3s",
"=",
"set",
"(",
")",
"for",
"suffix",
"in",
"get_extension_suffixes",
"(",
")",
":",
"if",
"suffix",
".",
"startswith",
"(",
"'.abi'",
")",
":",
"abi3s",
".",
"add",
"(",
"suffix",
".",
"split",
"(",
"'.'",
",",
"2",
")",
"[",
"1",
"]",
")",
"abis",
".",
"extend",
"(",
"sorted",
"(",
"list",
"(",
"abi3s",
")",
")",
")",
"abis",
".",
"append",
"(",
"'none'",
")",
"if",
"not",
"noarch",
":",
"arch",
"=",
"platform",
"or",
"get_platform",
"(",
")",
"arch_prefix",
",",
"arch_sep",
",",
"arch_suffix",
"=",
"arch",
".",
"partition",
"(",
"'_'",
")",
"if",
"arch",
".",
"startswith",
"(",
"'macosx'",
")",
":",
"# support macosx-10.6-intel on macosx-10.9-x86_64",
"match",
"=",
"_osx_arch_pat",
".",
"match",
"(",
"arch",
")",
"if",
"match",
":",
"name",
",",
"major",
",",
"minor",
",",
"actual_arch",
"=",
"match",
".",
"groups",
"(",
")",
"tpl",
"=",
"'{}_{}_%i_%s'",
".",
"format",
"(",
"name",
",",
"major",
")",
"arches",
"=",
"[",
"]",
"for",
"m",
"in",
"reversed",
"(",
"range",
"(",
"int",
"(",
"minor",
")",
"+",
"1",
")",
")",
":",
"for",
"a",
"in",
"get_darwin_arches",
"(",
"int",
"(",
"major",
")",
",",
"m",
",",
"actual_arch",
")",
":",
"arches",
".",
"append",
"(",
"tpl",
"%",
"(",
"m",
",",
"a",
")",
")",
"else",
":",
"# arch pattern didn't match (?!)",
"arches",
"=",
"[",
"arch",
"]",
"elif",
"arch_prefix",
"==",
"'manylinux2010'",
":",
"# manylinux1 wheels run on most manylinux2010 systems with the",
"# exception of wheels depending on ncurses. PEP 571 states",
"# manylinux1 wheels should be considered manylinux2010 wheels:",
"# https://www.python.org/dev/peps/pep-0571/#backwards-compatibility-with-manylinux1-wheels",
"arches",
"=",
"[",
"arch",
",",
"'manylinux1'",
"+",
"arch_sep",
"+",
"arch_suffix",
"]",
"elif",
"platform",
"is",
"None",
":",
"arches",
"=",
"[",
"]",
"if",
"is_manylinux2010_compatible",
"(",
")",
":",
"arches",
".",
"append",
"(",
"'manylinux2010'",
"+",
"arch_sep",
"+",
"arch_suffix",
")",
"if",
"is_manylinux1_compatible",
"(",
")",
":",
"arches",
".",
"append",
"(",
"'manylinux1'",
"+",
"arch_sep",
"+",
"arch_suffix",
")",
"arches",
".",
"append",
"(",
"arch",
")",
"else",
":",
"arches",
"=",
"[",
"arch",
"]",
"# Current version, current API (built specifically for our Python):",
"for",
"abi",
"in",
"abis",
":",
"for",
"arch",
"in",
"arches",
":",
"supported",
".",
"append",
"(",
"(",
"'%s%s'",
"%",
"(",
"impl",
",",
"versions",
"[",
"0",
"]",
")",
",",
"abi",
",",
"arch",
")",
")",
"# abi3 modules compatible with older version of Python",
"for",
"version",
"in",
"versions",
"[",
"1",
":",
"]",
":",
"# abi3 was introduced in Python 3.2",
"if",
"version",
"in",
"{",
"'31'",
",",
"'30'",
"}",
":",
"break",
"for",
"abi",
"in",
"abi3s",
":",
"# empty set if not Python 3",
"for",
"arch",
"in",
"arches",
":",
"supported",
".",
"append",
"(",
"(",
"\"%s%s\"",
"%",
"(",
"impl",
",",
"version",
")",
",",
"abi",
",",
"arch",
")",
")",
"# Has binaries, does not use the Python API:",
"for",
"arch",
"in",
"arches",
":",
"supported",
".",
"append",
"(",
"(",
"'py%s'",
"%",
"(",
"versions",
"[",
"0",
"]",
"[",
"0",
"]",
")",
",",
"'none'",
",",
"arch",
")",
")",
"# No abi / arch, but requires our implementation:",
"supported",
".",
"append",
"(",
"(",
"'%s%s'",
"%",
"(",
"impl",
",",
"versions",
"[",
"0",
"]",
")",
",",
"'none'",
",",
"'any'",
")",
")",
"# Tagged specifically as being cross-version compatible",
"# (with just the major version specified)",
"supported",
".",
"append",
"(",
"(",
"'%s%s'",
"%",
"(",
"impl",
",",
"versions",
"[",
"0",
"]",
"[",
"0",
"]",
")",
",",
"'none'",
",",
"'any'",
")",
")",
"# No abi / arch, generic Python",
"for",
"i",
",",
"version",
"in",
"enumerate",
"(",
"versions",
")",
":",
"supported",
".",
"append",
"(",
"(",
"'py%s'",
"%",
"(",
"version",
",",
")",
",",
"'none'",
",",
"'any'",
")",
")",
"if",
"i",
"==",
"0",
":",
"supported",
".",
"append",
"(",
"(",
"'py%s'",
"%",
"(",
"version",
"[",
"0",
"]",
")",
",",
"'none'",
",",
"'any'",
")",
")",
"return",
"supported"
] | https://github.com/okex/V3-Open-API-SDK/blob/c5abb0db7e2287718e0055e17e57672ce0ec7fd9/okex-python-sdk-api/venv/Lib/site-packages/pip-19.0.3-py3.8.egg/pip/_internal/pep425tags.py#L272-L378 |
|
Polidea/SiriusObfuscator | b0e590d8130e97856afe578869b83a209e2b19be | SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py | python | SBBlock.GetRangeIndexForBlockAddress | (self, *args) | return _lldb.SBBlock_GetRangeIndexForBlockAddress(self, *args) | GetRangeIndexForBlockAddress(self, SBAddress block_addr) -> uint32_t | GetRangeIndexForBlockAddress(self, SBAddress block_addr) -> uint32_t | [
"GetRangeIndexForBlockAddress",
"(",
"self",
"SBAddress",
"block_addr",
")",
"-",
">",
"uint32_t"
] | def GetRangeIndexForBlockAddress(self, *args):
"""GetRangeIndexForBlockAddress(self, SBAddress block_addr) -> uint32_t"""
return _lldb.SBBlock_GetRangeIndexForBlockAddress(self, *args) | [
"def",
"GetRangeIndexForBlockAddress",
"(",
"self",
",",
"*",
"args",
")",
":",
"return",
"_lldb",
".",
"SBBlock_GetRangeIndexForBlockAddress",
"(",
"self",
",",
"*",
"args",
")"
] | https://github.com/Polidea/SiriusObfuscator/blob/b0e590d8130e97856afe578869b83a209e2b19be/SymbolExtractorAndRenamer/lldb/scripts/Python/static-binding/lldb.py#L1265-L1267 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/socket.py | python | socket.makefile | (self, mode="r", buffering=None, *,
encoding=None, errors=None, newline=None) | return text | makefile(...) -> an I/O stream connected to the socket
The arguments are as for io.open() after the filename, except the only
supported mode values are 'r' (default), 'w' and 'b'. | makefile(...) -> an I/O stream connected to the socket | [
"makefile",
"(",
"...",
")",
"-",
">",
"an",
"I",
"/",
"O",
"stream",
"connected",
"to",
"the",
"socket"
] | def makefile(self, mode="r", buffering=None, *,
encoding=None, errors=None, newline=None):
"""makefile(...) -> an I/O stream connected to the socket
The arguments are as for io.open() after the filename, except the only
supported mode values are 'r' (default), 'w' and 'b'.
"""
# XXX refactor to share code?
if not set(mode) <= {"r", "w", "b"}:
raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,))
writing = "w" in mode
reading = "r" in mode or not writing
assert reading or writing
binary = "b" in mode
rawmode = ""
if reading:
rawmode += "r"
if writing:
rawmode += "w"
raw = SocketIO(self, rawmode)
self._io_refs += 1
if buffering is None:
buffering = -1
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
if not binary:
raise ValueError("unbuffered streams must be binary")
return raw
if reading and writing:
buffer = io.BufferedRWPair(raw, raw, buffering)
elif reading:
buffer = io.BufferedReader(raw, buffering)
else:
assert writing
buffer = io.BufferedWriter(raw, buffering)
if binary:
return buffer
text = io.TextIOWrapper(buffer, encoding, errors, newline)
text.mode = mode
return text | [
"def",
"makefile",
"(",
"self",
",",
"mode",
"=",
"\"r\"",
",",
"buffering",
"=",
"None",
",",
"*",
",",
"encoding",
"=",
"None",
",",
"errors",
"=",
"None",
",",
"newline",
"=",
"None",
")",
":",
"# XXX refactor to share code?",
"if",
"not",
"set",
"(",
"mode",
")",
"<=",
"{",
"\"r\"",
",",
"\"w\"",
",",
"\"b\"",
"}",
":",
"raise",
"ValueError",
"(",
"\"invalid mode %r (only r, w, b allowed)\"",
"%",
"(",
"mode",
",",
")",
")",
"writing",
"=",
"\"w\"",
"in",
"mode",
"reading",
"=",
"\"r\"",
"in",
"mode",
"or",
"not",
"writing",
"assert",
"reading",
"or",
"writing",
"binary",
"=",
"\"b\"",
"in",
"mode",
"rawmode",
"=",
"\"\"",
"if",
"reading",
":",
"rawmode",
"+=",
"\"r\"",
"if",
"writing",
":",
"rawmode",
"+=",
"\"w\"",
"raw",
"=",
"SocketIO",
"(",
"self",
",",
"rawmode",
")",
"self",
".",
"_io_refs",
"+=",
"1",
"if",
"buffering",
"is",
"None",
":",
"buffering",
"=",
"-",
"1",
"if",
"buffering",
"<",
"0",
":",
"buffering",
"=",
"io",
".",
"DEFAULT_BUFFER_SIZE",
"if",
"buffering",
"==",
"0",
":",
"if",
"not",
"binary",
":",
"raise",
"ValueError",
"(",
"\"unbuffered streams must be binary\"",
")",
"return",
"raw",
"if",
"reading",
"and",
"writing",
":",
"buffer",
"=",
"io",
".",
"BufferedRWPair",
"(",
"raw",
",",
"raw",
",",
"buffering",
")",
"elif",
"reading",
":",
"buffer",
"=",
"io",
".",
"BufferedReader",
"(",
"raw",
",",
"buffering",
")",
"else",
":",
"assert",
"writing",
"buffer",
"=",
"io",
".",
"BufferedWriter",
"(",
"raw",
",",
"buffering",
")",
"if",
"binary",
":",
"return",
"buffer",
"text",
"=",
"io",
".",
"TextIOWrapper",
"(",
"buffer",
",",
"encoding",
",",
"errors",
",",
"newline",
")",
"text",
".",
"mode",
"=",
"mode",
"return",
"text"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/linux_x64/lib/python3.7/socket.py#L221-L261 |
|
epam/Indigo | 30e40b4b1eb9bae0207435a26cfcb81ddcc42be1 | api/python/indigo/__init__.py | python | Indigo.getFragmentedMolecule | (self, elem, options="") | Returns fragmented molecule for the given composition element
Args:
elem (IndigoObject): composition element object
options (str): Fragmentation options. Optional, defaults to "".
Returns:
IndigoObject: fragmented structure object | Returns fragmented molecule for the given composition element | [
"Returns",
"fragmented",
"molecule",
"for",
"the",
"given",
"composition",
"element"
] | def getFragmentedMolecule(self, elem, options=""):
"""Returns fragmented molecule for the given composition element
Args:
elem (IndigoObject): composition element object
options (str): Fragmentation options. Optional, defaults to "".
Returns:
IndigoObject: fragmented structure object
"""
if options is None:
options = ""
self._setSessionId()
newobj = self._checkResult(
Indigo._lib.indigoGetFragmentedMolecule(
elem.id, options.encode(ENCODE_ENCODING)
)
)
if newobj == 0:
return None
else:
return self.IndigoObject(self, newobj, self) | [
"def",
"getFragmentedMolecule",
"(",
"self",
",",
"elem",
",",
"options",
"=",
"\"\"",
")",
":",
"if",
"options",
"is",
"None",
":",
"options",
"=",
"\"\"",
"self",
".",
"_setSessionId",
"(",
")",
"newobj",
"=",
"self",
".",
"_checkResult",
"(",
"Indigo",
".",
"_lib",
".",
"indigoGetFragmentedMolecule",
"(",
"elem",
".",
"id",
",",
"options",
".",
"encode",
"(",
"ENCODE_ENCODING",
")",
")",
")",
"if",
"newobj",
"==",
"0",
":",
"return",
"None",
"else",
":",
"return",
"self",
".",
"IndigoObject",
"(",
"self",
",",
"newobj",
",",
"self",
")"
] | https://github.com/epam/Indigo/blob/30e40b4b1eb9bae0207435a26cfcb81ddcc42be1/api/python/indigo/__init__.py#L6283-L6304 |
||
KratosMultiphysics/Kratos | 0000833054ed0503424eb28205d6508d9ca6cbbc | applications/FSIApplication/python_scripts/fsi_coupling_interface.py | python | FSICouplingInterface.SetConvergenceAccelerator | (self, convergence_accelerator) | Set the provided convergence accelerator to the current FSI coupling interface
This function sets the convergence accelerator of the current FSI coupling interface
This auxiliary method is understood to set the convergence accelerator in those situations
in which its constructor requires the FSI coupling interface model part to be set (e.g. MPI) | Set the provided convergence accelerator to the current FSI coupling interface | [
"Set",
"the",
"provided",
"convergence",
"accelerator",
"to",
"the",
"current",
"FSI",
"coupling",
"interface"
] | def SetConvergenceAccelerator(self, convergence_accelerator):
"""Set the provided convergence accelerator to the current FSI coupling interface
This function sets the convergence accelerator of the current FSI coupling interface
This auxiliary method is understood to set the convergence accelerator in those situations
in which its constructor requires the FSI coupling interface model part to be set (e.g. MPI)
"""
self.convergence_accelerator = convergence_accelerator | [
"def",
"SetConvergenceAccelerator",
"(",
"self",
",",
"convergence_accelerator",
")",
":",
"self",
".",
"convergence_accelerator",
"=",
"convergence_accelerator"
] | https://github.com/KratosMultiphysics/Kratos/blob/0000833054ed0503424eb28205d6508d9ca6cbbc/applications/FSIApplication/python_scripts/fsi_coupling_interface.py#L81-L88 |
||
mindspore-ai/mindspore | fb8fd3338605bb34fa5cea054e535a8b1d753fab | mindspore/python/mindspore/dataset/vision/validators.py | python | check_rgb_to_bgr | (method) | return new_method | Wrapper method to check the parameters of rgb_to_bgr. | Wrapper method to check the parameters of rgb_to_bgr. | [
"Wrapper",
"method",
"to",
"check",
"the",
"parameters",
"of",
"rgb_to_bgr",
"."
] | def check_rgb_to_bgr(method):
"""Wrapper method to check the parameters of rgb_to_bgr."""
@wraps(method)
def new_method(self, *args, **kwargs):
[is_hwc], _ = parse_user_args(method, *args, **kwargs)
type_check(is_hwc, (bool,), "is_hwc")
return method(self, *args, **kwargs)
return new_method | [
"def",
"check_rgb_to_bgr",
"(",
"method",
")",
":",
"@",
"wraps",
"(",
"method",
")",
"def",
"new_method",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"[",
"is_hwc",
"]",
",",
"_",
"=",
"parse_user_args",
"(",
"method",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"type_check",
"(",
"is_hwc",
",",
"(",
"bool",
",",
")",
",",
"\"is_hwc\"",
")",
"return",
"method",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"new_method"
] | https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/dataset/vision/validators.py#L621-L630 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/pyparsing.py | python | ParserElement.transformString | (self, instring) | Extension to :class:`scanString`, to modify matching text with modified tokens that may
be returned from a parse action. To use ``transformString``, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking ``transformString()`` on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. ``transformString()`` returns the resulting transformed string.
Example::
wd = Word(alphas)
wd.setParseAction(lambda toks: toks[0].title())
print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
prints::
Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York. | Extension to :class:`scanString`, to modify matching text with modified tokens that may
be returned from a parse action. To use ``transformString``, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking ``transformString()`` on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. ``transformString()`` returns the resulting transformed string. | [
"Extension",
"to",
":",
"class",
":",
"scanString",
"to",
"modify",
"matching",
"text",
"with",
"modified",
"tokens",
"that",
"may",
"be",
"returned",
"from",
"a",
"parse",
"action",
".",
"To",
"use",
"transformString",
"define",
"a",
"grammar",
"and",
"attach",
"a",
"parse",
"action",
"to",
"it",
"that",
"modifies",
"the",
"returned",
"token",
"list",
".",
"Invoking",
"transformString",
"()",
"on",
"a",
"target",
"string",
"will",
"then",
"scan",
"for",
"matches",
"and",
"replace",
"the",
"matched",
"text",
"patterns",
"according",
"to",
"the",
"logic",
"in",
"the",
"parse",
"action",
".",
"transformString",
"()",
"returns",
"the",
"resulting",
"transformed",
"string",
"."
] | def transformString(self, instring):
"""
Extension to :class:`scanString`, to modify matching text with modified tokens that may
be returned from a parse action. To use ``transformString``, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking ``transformString()`` on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. ``transformString()`` returns the resulting transformed string.
Example::
wd = Word(alphas)
wd.setParseAction(lambda toks: toks[0].title())
print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
prints::
Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
"""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
try:
for t, s, e in self.scanString(instring):
out.append(instring[lastE:s])
if t:
if isinstance(t, ParseResults):
out += t.asList()
elif isinstance(t, list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
out = [o for o in out if o]
return "".join(map(_ustr, _flatten(out)))
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clearing out pyparsing internal stack trace
if getattr(exc, '__traceback__', None) is not None:
exc.__traceback__ = self._trim_traceback(exc.__traceback__)
raise exc | [
"def",
"transformString",
"(",
"self",
",",
"instring",
")",
":",
"out",
"=",
"[",
"]",
"lastE",
"=",
"0",
"# force preservation of <TAB>s, to minimize unwanted transformation of string, and to",
"# keep string locs straight between transformString and scanString",
"self",
".",
"keepTabs",
"=",
"True",
"try",
":",
"for",
"t",
",",
"s",
",",
"e",
"in",
"self",
".",
"scanString",
"(",
"instring",
")",
":",
"out",
".",
"append",
"(",
"instring",
"[",
"lastE",
":",
"s",
"]",
")",
"if",
"t",
":",
"if",
"isinstance",
"(",
"t",
",",
"ParseResults",
")",
":",
"out",
"+=",
"t",
".",
"asList",
"(",
")",
"elif",
"isinstance",
"(",
"t",
",",
"list",
")",
":",
"out",
"+=",
"t",
"else",
":",
"out",
".",
"append",
"(",
"t",
")",
"lastE",
"=",
"e",
"out",
".",
"append",
"(",
"instring",
"[",
"lastE",
":",
"]",
")",
"out",
"=",
"[",
"o",
"for",
"o",
"in",
"out",
"if",
"o",
"]",
"return",
"\"\"",
".",
"join",
"(",
"map",
"(",
"_ustr",
",",
"_flatten",
"(",
"out",
")",
")",
")",
"except",
"ParseBaseException",
"as",
"exc",
":",
"if",
"ParserElement",
".",
"verbose_stacktrace",
":",
"raise",
"else",
":",
"# catch and re-raise exception from here, clearing out pyparsing internal stack trace",
"if",
"getattr",
"(",
"exc",
",",
"'__traceback__'",
",",
"None",
")",
"is",
"not",
"None",
":",
"exc",
".",
"__traceback__",
"=",
"self",
".",
"_trim_traceback",
"(",
"exc",
".",
"__traceback__",
")",
"raise",
"exc"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/site-packages/pip/_vendor/pyparsing.py#L2033-L2079 |
||
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/PowderILLEfficiency.py | python | PowderILLEfficiency._process_global | (self) | Performs the global derivation for D2B following the logic:
1. SumOverlappingTubes with 2D option to obtain the reference
2. Loop over tubes, make ratios wrt reference, obtain constants
3. Apply the constants, and iterate over if requested | Performs the global derivation for D2B following the logic:
1. SumOverlappingTubes with 2D option to obtain the reference
2. Loop over tubes, make ratios wrt reference, obtain constants
3. Apply the constants, and iterate over if requested | [
"Performs",
"the",
"global",
"derivation",
"for",
"D2B",
"following",
"the",
"logic",
":",
"1",
".",
"SumOverlappingTubes",
"with",
"2D",
"option",
"to",
"obtain",
"the",
"reference",
"2",
".",
"Loop",
"over",
"tubes",
"make",
"ratios",
"wrt",
"reference",
"obtain",
"constants",
"3",
".",
"Apply",
"the",
"constants",
"and",
"iterate",
"over",
"if",
"requested"
] | def _process_global(self):
"""
Performs the global derivation for D2B following the logic:
1. SumOverlappingTubes with 2D option to obtain the reference
2. Loop over tubes, make ratios wrt reference, obtain constants
3. Apply the constants, and iterate over if requested
"""
data_type = 'Raw'
if self.getProperty('UseCalibratedData').value:
data_type = 'Calibrated'
constants_ws = self._hide('constants')
response_ws = self._hide('resp')
calib_ws = self._hide('calib')
ref_ws = self._hide('ref')
numors = []
self._progress = Progress(self, start=0.0, end=1.0, nreports=self._n_scan_files)
for index, numor in enumerate(self._input_files.split(',')):
self._progress.report('Pre-processing detector scan '+numor[-10:-4])
ws_name = '__raw_'+str(index)
numors.append(ws_name)
LoadILLDiffraction(Filename=numor, OutputWorkspace=ws_name, DataType=data_type)
self._validate_scan(ws_name)
if index == 0:
if mtd[ws_name].getInstrument().getName() != 'D2B':
raise RuntimeError('Global reference method is not supported for the instrument given')
self._configure_global(ws_name)
if self._normalise_to == 'Monitor':
NormaliseToMonitor(InputWorkspace=ws_name, OutputWorkspace=ws_name, MonitorID=0)
ExtractMonitors(InputWorkspace=ws_name, DetectorWorkspace=ws_name)
ConvertSpectrumAxis(InputWorkspace=ws_name, OrderAxis=False, Target="SignedTheta", OutputWorkspace=ws_name)
if self._calib_file:
ApplyDetectorScanEffCorr(InputWorkspace=ws_name, DetectorEfficiencyWorkspace=calib_ws, OutputWorkspace=ws_name)
n_scan_steps = mtd[ws_name].getRun().getLogData("ScanSteps").value
if n_scan_steps != self._n_scans_per_file:
self.log().warning("Run {0} has {1} scan points instead of {2}.".
format(numor[-10:-4], n_scan_steps, self._n_scans_per_file))
self._crop_last_time_index(ws_name, n_scan_steps)
if self._calib_file:
DeleteWorkspace(calib_ws)
constants = np.ones([self._n_pixels_per_tube,self._n_tubes])
x = np.arange(self._n_tubes)
e = np.zeros([self._n_pixels_per_tube,self._n_tubes])
CreateWorkspace(DataX=np.tile(x, self._n_pixels_per_tube), DataY=constants, DataE=e,
NSpec=self._n_pixels_per_tube, OutputWorkspace=constants_ws)
calib_current = self._hide('current')
CloneWorkspace(InputWorkspace=constants_ws, OutputWorkspace=calib_current)
iteration = 0
chi2_ndof = np.inf # set a large number to start with
self._pixels_to_trim = 28
chi2_ndof_threshold = 1.
inst = mtd[numors[0]].getInstrument()
if inst.hasParameter('pixels_to_trim'):
self._pixels_to_trim = inst.getIntParameter('pixels_to_trim')[0]
if inst.hasParameter('chi2_ndof'):
chi2_ndof_threshold = inst.getNumberParameter('chi2_ndof')[0]
while iteration < self._n_iterations or (self._n_iterations == 0 and chi2_ndof > chi2_ndof_threshold):
self._progress = Progress(self, start=0.0, end=1.0, nreports=5)
self._progress.report('Starting iteration #'+str(iteration))
self._derive_calibration_global(numors)
Multiply(LHSWorkspace=constants_ws, RHSWorkspace=calib_current, OutputWorkspace=constants_ws)
chi2_ndof = self._chi_squared(calib_current)
if iteration != 0:
self.log().warning('Iteration {0}: Chi2/NdoF={1} (termination criterion: < {2})'.
format(iteration, chi2_ndof, chi2_ndof_threshold))
iteration += 1
if self._out_response:
for index in range(self._n_scan_files):
ws_name = '__raw_'+str(index)
ApplyDetectorScanEffCorr(InputWorkspace=ws_name, DetectorEfficiencyWorkspace=calib_current, OutputWorkspace=ws_name)
SumOverlappingTubes(InputWorkspaces=numors, OutputWorkspace=response_ws, MirrorScatteringAngles=False,
CropNegativeScatteringAngles=False, Normalise=True, OutputType="2DTubes")
DeleteWorkspace(ref_ws)
DeleteWorkspaces(numors)
DeleteWorkspace(calib_current)
mtd[constants_ws].getAxis(0).setUnit('Label').setLabel('Tube #', '')
mtd[constants_ws].getAxis(1).setUnit('Label').setLabel('Pixel #', '')
mtd[constants_ws].setYUnitLabel('Calibration constant') | [
"def",
"_process_global",
"(",
"self",
")",
":",
"data_type",
"=",
"'Raw'",
"if",
"self",
".",
"getProperty",
"(",
"'UseCalibratedData'",
")",
".",
"value",
":",
"data_type",
"=",
"'Calibrated'",
"constants_ws",
"=",
"self",
".",
"_hide",
"(",
"'constants'",
")",
"response_ws",
"=",
"self",
".",
"_hide",
"(",
"'resp'",
")",
"calib_ws",
"=",
"self",
".",
"_hide",
"(",
"'calib'",
")",
"ref_ws",
"=",
"self",
".",
"_hide",
"(",
"'ref'",
")",
"numors",
"=",
"[",
"]",
"self",
".",
"_progress",
"=",
"Progress",
"(",
"self",
",",
"start",
"=",
"0.0",
",",
"end",
"=",
"1.0",
",",
"nreports",
"=",
"self",
".",
"_n_scan_files",
")",
"for",
"index",
",",
"numor",
"in",
"enumerate",
"(",
"self",
".",
"_input_files",
".",
"split",
"(",
"','",
")",
")",
":",
"self",
".",
"_progress",
".",
"report",
"(",
"'Pre-processing detector scan '",
"+",
"numor",
"[",
"-",
"10",
":",
"-",
"4",
"]",
")",
"ws_name",
"=",
"'__raw_'",
"+",
"str",
"(",
"index",
")",
"numors",
".",
"append",
"(",
"ws_name",
")",
"LoadILLDiffraction",
"(",
"Filename",
"=",
"numor",
",",
"OutputWorkspace",
"=",
"ws_name",
",",
"DataType",
"=",
"data_type",
")",
"self",
".",
"_validate_scan",
"(",
"ws_name",
")",
"if",
"index",
"==",
"0",
":",
"if",
"mtd",
"[",
"ws_name",
"]",
".",
"getInstrument",
"(",
")",
".",
"getName",
"(",
")",
"!=",
"'D2B'",
":",
"raise",
"RuntimeError",
"(",
"'Global reference method is not supported for the instrument given'",
")",
"self",
".",
"_configure_global",
"(",
"ws_name",
")",
"if",
"self",
".",
"_normalise_to",
"==",
"'Monitor'",
":",
"NormaliseToMonitor",
"(",
"InputWorkspace",
"=",
"ws_name",
",",
"OutputWorkspace",
"=",
"ws_name",
",",
"MonitorID",
"=",
"0",
")",
"ExtractMonitors",
"(",
"InputWorkspace",
"=",
"ws_name",
",",
"DetectorWorkspace",
"=",
"ws_name",
")",
"ConvertSpectrumAxis",
"(",
"InputWorkspace",
"=",
"ws_name",
",",
"OrderAxis",
"=",
"False",
",",
"Target",
"=",
"\"SignedTheta\"",
",",
"OutputWorkspace",
"=",
"ws_name",
")",
"if",
"self",
".",
"_calib_file",
":",
"ApplyDetectorScanEffCorr",
"(",
"InputWorkspace",
"=",
"ws_name",
",",
"DetectorEfficiencyWorkspace",
"=",
"calib_ws",
",",
"OutputWorkspace",
"=",
"ws_name",
")",
"n_scan_steps",
"=",
"mtd",
"[",
"ws_name",
"]",
".",
"getRun",
"(",
")",
".",
"getLogData",
"(",
"\"ScanSteps\"",
")",
".",
"value",
"if",
"n_scan_steps",
"!=",
"self",
".",
"_n_scans_per_file",
":",
"self",
".",
"log",
"(",
")",
".",
"warning",
"(",
"\"Run {0} has {1} scan points instead of {2}.\"",
".",
"format",
"(",
"numor",
"[",
"-",
"10",
":",
"-",
"4",
"]",
",",
"n_scan_steps",
",",
"self",
".",
"_n_scans_per_file",
")",
")",
"self",
".",
"_crop_last_time_index",
"(",
"ws_name",
",",
"n_scan_steps",
")",
"if",
"self",
".",
"_calib_file",
":",
"DeleteWorkspace",
"(",
"calib_ws",
")",
"constants",
"=",
"np",
".",
"ones",
"(",
"[",
"self",
".",
"_n_pixels_per_tube",
",",
"self",
".",
"_n_tubes",
"]",
")",
"x",
"=",
"np",
".",
"arange",
"(",
"self",
".",
"_n_tubes",
")",
"e",
"=",
"np",
".",
"zeros",
"(",
"[",
"self",
".",
"_n_pixels_per_tube",
",",
"self",
".",
"_n_tubes",
"]",
")",
"CreateWorkspace",
"(",
"DataX",
"=",
"np",
".",
"tile",
"(",
"x",
",",
"self",
".",
"_n_pixels_per_tube",
")",
",",
"DataY",
"=",
"constants",
",",
"DataE",
"=",
"e",
",",
"NSpec",
"=",
"self",
".",
"_n_pixels_per_tube",
",",
"OutputWorkspace",
"=",
"constants_ws",
")",
"calib_current",
"=",
"self",
".",
"_hide",
"(",
"'current'",
")",
"CloneWorkspace",
"(",
"InputWorkspace",
"=",
"constants_ws",
",",
"OutputWorkspace",
"=",
"calib_current",
")",
"iteration",
"=",
"0",
"chi2_ndof",
"=",
"np",
".",
"inf",
"# set a large number to start with",
"self",
".",
"_pixels_to_trim",
"=",
"28",
"chi2_ndof_threshold",
"=",
"1.",
"inst",
"=",
"mtd",
"[",
"numors",
"[",
"0",
"]",
"]",
".",
"getInstrument",
"(",
")",
"if",
"inst",
".",
"hasParameter",
"(",
"'pixels_to_trim'",
")",
":",
"self",
".",
"_pixels_to_trim",
"=",
"inst",
".",
"getIntParameter",
"(",
"'pixels_to_trim'",
")",
"[",
"0",
"]",
"if",
"inst",
".",
"hasParameter",
"(",
"'chi2_ndof'",
")",
":",
"chi2_ndof_threshold",
"=",
"inst",
".",
"getNumberParameter",
"(",
"'chi2_ndof'",
")",
"[",
"0",
"]",
"while",
"iteration",
"<",
"self",
".",
"_n_iterations",
"or",
"(",
"self",
".",
"_n_iterations",
"==",
"0",
"and",
"chi2_ndof",
">",
"chi2_ndof_threshold",
")",
":",
"self",
".",
"_progress",
"=",
"Progress",
"(",
"self",
",",
"start",
"=",
"0.0",
",",
"end",
"=",
"1.0",
",",
"nreports",
"=",
"5",
")",
"self",
".",
"_progress",
".",
"report",
"(",
"'Starting iteration #'",
"+",
"str",
"(",
"iteration",
")",
")",
"self",
".",
"_derive_calibration_global",
"(",
"numors",
")",
"Multiply",
"(",
"LHSWorkspace",
"=",
"constants_ws",
",",
"RHSWorkspace",
"=",
"calib_current",
",",
"OutputWorkspace",
"=",
"constants_ws",
")",
"chi2_ndof",
"=",
"self",
".",
"_chi_squared",
"(",
"calib_current",
")",
"if",
"iteration",
"!=",
"0",
":",
"self",
".",
"log",
"(",
")",
".",
"warning",
"(",
"'Iteration {0}: Chi2/NdoF={1} (termination criterion: < {2})'",
".",
"format",
"(",
"iteration",
",",
"chi2_ndof",
",",
"chi2_ndof_threshold",
")",
")",
"iteration",
"+=",
"1",
"if",
"self",
".",
"_out_response",
":",
"for",
"index",
"in",
"range",
"(",
"self",
".",
"_n_scan_files",
")",
":",
"ws_name",
"=",
"'__raw_'",
"+",
"str",
"(",
"index",
")",
"ApplyDetectorScanEffCorr",
"(",
"InputWorkspace",
"=",
"ws_name",
",",
"DetectorEfficiencyWorkspace",
"=",
"calib_current",
",",
"OutputWorkspace",
"=",
"ws_name",
")",
"SumOverlappingTubes",
"(",
"InputWorkspaces",
"=",
"numors",
",",
"OutputWorkspace",
"=",
"response_ws",
",",
"MirrorScatteringAngles",
"=",
"False",
",",
"CropNegativeScatteringAngles",
"=",
"False",
",",
"Normalise",
"=",
"True",
",",
"OutputType",
"=",
"\"2DTubes\"",
")",
"DeleteWorkspace",
"(",
"ref_ws",
")",
"DeleteWorkspaces",
"(",
"numors",
")",
"DeleteWorkspace",
"(",
"calib_current",
")",
"mtd",
"[",
"constants_ws",
"]",
".",
"getAxis",
"(",
"0",
")",
".",
"setUnit",
"(",
"'Label'",
")",
".",
"setLabel",
"(",
"'Tube #'",
",",
"''",
")",
"mtd",
"[",
"constants_ws",
"]",
".",
"getAxis",
"(",
"1",
")",
".",
"setUnit",
"(",
"'Label'",
")",
".",
"setLabel",
"(",
"'Pixel #'",
",",
"''",
")",
"mtd",
"[",
"constants_ws",
"]",
".",
"setYUnitLabel",
"(",
"'Calibration constant'",
")"
] | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/PowderILLEfficiency.py#L636-L720 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/richtext.py | python | RichTextBuffer.SetBulletRightMargin | (*args, **kwargs) | return _richtext.RichTextBuffer_SetBulletRightMargin(*args, **kwargs) | SetBulletRightMargin(int margin) | SetBulletRightMargin(int margin) | [
"SetBulletRightMargin",
"(",
"int",
"margin",
")"
] | def SetBulletRightMargin(*args, **kwargs):
"""SetBulletRightMargin(int margin)"""
return _richtext.RichTextBuffer_SetBulletRightMargin(*args, **kwargs) | [
"def",
"SetBulletRightMargin",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_richtext",
".",
"RichTextBuffer_SetBulletRightMargin",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/richtext.py#L2620-L2622 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/email/charset.py | python | Charset.get_body_encoding | (self) | Return the content-transfer-encoding used for body encoding.
This is either the string `quoted-printable' or `base64' depending on
the encoding used, or it is a function in which case you should call
the function with a single argument, the Message object being
encoded. The function should then set the Content-Transfer-Encoding
header itself to whatever is appropriate.
Returns "quoted-printable" if self.body_encoding is QP.
Returns "base64" if self.body_encoding is BASE64.
Returns conversion function otherwise. | Return the content-transfer-encoding used for body encoding. | [
"Return",
"the",
"content",
"-",
"transfer",
"-",
"encoding",
"used",
"for",
"body",
"encoding",
"."
] | def get_body_encoding(self):
"""Return the content-transfer-encoding used for body encoding.
This is either the string `quoted-printable' or `base64' depending on
the encoding used, or it is a function in which case you should call
the function with a single argument, the Message object being
encoded. The function should then set the Content-Transfer-Encoding
header itself to whatever is appropriate.
Returns "quoted-printable" if self.body_encoding is QP.
Returns "base64" if self.body_encoding is BASE64.
Returns conversion function otherwise.
"""
assert self.body_encoding != SHORTEST
if self.body_encoding == QP:
return 'quoted-printable'
elif self.body_encoding == BASE64:
return 'base64'
else:
return encode_7or8bit | [
"def",
"get_body_encoding",
"(",
"self",
")",
":",
"assert",
"self",
".",
"body_encoding",
"!=",
"SHORTEST",
"if",
"self",
".",
"body_encoding",
"==",
"QP",
":",
"return",
"'quoted-printable'",
"elif",
"self",
".",
"body_encoding",
"==",
"BASE64",
":",
"return",
"'base64'",
"else",
":",
"return",
"encode_7or8bit"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/email/charset.py#L252-L271 |
||
sccn/lsl_archived | 2ff44b7a5172b02fe845b1fc72b9ab5578a489ed | LSL/liblsl-Python/pylsl/pylsl.py | python | resolve_bypred | (predicate, minimum=1, timeout=FOREVER) | return [StreamInfo(handle=buffer[k]) for k in range(num_found)] | Resolve all streams that match a given predicate.
Advanced query that allows to impose more conditions on the retrieved
streams; the given string is an XPath 1.0 predicate for the <description>
node (omitting the surrounding []'s), see also
http://en.wikipedia.org/w/index.php?title=XPath_1.0&oldid=474981951.
Keyword arguments:
predicate -- The predicate string, e.g. "name='BioSemi'" or
"type='EEG' and starts-with(name,'BioSemi') and
count(description/desc/channels/channel)=32"
minimum -- Return at least this many streams. (default 1)
timeout -- Optionally a timeout of the operation, in seconds. If the
timeout expires, less than the desired number of streams
(possibly none) will be returned. (default FOREVER)
Returns a list of matching StreamInfo objects (with empty desc field), any
of which can subsequently be used to open an inlet. | Resolve all streams that match a given predicate. | [
"Resolve",
"all",
"streams",
"that",
"match",
"a",
"given",
"predicate",
"."
] | def resolve_bypred(predicate, minimum=1, timeout=FOREVER):
"""Resolve all streams that match a given predicate.
Advanced query that allows to impose more conditions on the retrieved
streams; the given string is an XPath 1.0 predicate for the <description>
node (omitting the surrounding []'s), see also
http://en.wikipedia.org/w/index.php?title=XPath_1.0&oldid=474981951.
Keyword arguments:
predicate -- The predicate string, e.g. "name='BioSemi'" or
"type='EEG' and starts-with(name,'BioSemi') and
count(description/desc/channels/channel)=32"
minimum -- Return at least this many streams. (default 1)
timeout -- Optionally a timeout of the operation, in seconds. If the
timeout expires, less than the desired number of streams
(possibly none) will be returned. (default FOREVER)
Returns a list of matching StreamInfo objects (with empty desc field), any
of which can subsequently be used to open an inlet.
"""
# noinspection PyCallingNonCallable
buffer = (c_void_p*1024)()
num_found = lib.lsl_resolve_bypred(byref(buffer), 1024,
c_char_p(str.encode(predicate)),
minimum,
c_double(timeout))
return [StreamInfo(handle=buffer[k]) for k in range(num_found)] | [
"def",
"resolve_bypred",
"(",
"predicate",
",",
"minimum",
"=",
"1",
",",
"timeout",
"=",
"FOREVER",
")",
":",
"# noinspection PyCallingNonCallable",
"buffer",
"=",
"(",
"c_void_p",
"*",
"1024",
")",
"(",
")",
"num_found",
"=",
"lib",
".",
"lsl_resolve_bypred",
"(",
"byref",
"(",
"buffer",
")",
",",
"1024",
",",
"c_char_p",
"(",
"str",
".",
"encode",
"(",
"predicate",
")",
")",
",",
"minimum",
",",
"c_double",
"(",
"timeout",
")",
")",
"return",
"[",
"StreamInfo",
"(",
"handle",
"=",
"buffer",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"range",
"(",
"num_found",
")",
"]"
] | https://github.com/sccn/lsl_archived/blob/2ff44b7a5172b02fe845b1fc72b9ab5578a489ed/LSL/liblsl-Python/pylsl/pylsl.py#L570-L597 |
|
widelands/widelands | e9f047d46a23d81312237d52eabf7d74e8de52d6 | doc/sphinx/documentation_enhancements.py | python | add_child_of | (rst_data, outfile) | return rst_data | Adds the String 'Child of: …, …' to rst_data. | Adds the String 'Child of: …, …' to rst_data. | [
"Adds",
"the",
"String",
"Child",
"of",
":",
"…",
"…",
"to",
"rst_data",
"."
] | def add_child_of(rst_data, outfile):
"""Adds the String 'Child of: …, …' to rst_data."""
found_classes = RSTDATA_CLS_RE.findall(rst_data)
for c_name in found_classes:
cls_inst = classes.get_instance(c_name, outfile)
parents = classes.get_parent_tree(cls_inst)
if parents:
repl_str = '.. class:: {}\n\n'.format(cls_inst.name)
child_str = '{} Child of:'.format(repl_str)
for i, parent in enumerate(parents):
if classes.have_same_source(parent, cls_inst):
cls_name = parent.name
else:
# Apply the long name to make sphinx-links work across
# documents
cls_name = parent.long_name
child_str += ' :class:`{}`'.format(cls_name)
if i < len(parents) - 1:
# Add separator except after last entry
child_str += ', '
child_str += '\n\n'
rst_data = rst_data.replace(repl_str, child_str)
return rst_data | [
"def",
"add_child_of",
"(",
"rst_data",
",",
"outfile",
")",
":",
"found_classes",
"=",
"RSTDATA_CLS_RE",
".",
"findall",
"(",
"rst_data",
")",
"for",
"c_name",
"in",
"found_classes",
":",
"cls_inst",
"=",
"classes",
".",
"get_instance",
"(",
"c_name",
",",
"outfile",
")",
"parents",
"=",
"classes",
".",
"get_parent_tree",
"(",
"cls_inst",
")",
"if",
"parents",
":",
"repl_str",
"=",
"'.. class:: {}\\n\\n'",
".",
"format",
"(",
"cls_inst",
".",
"name",
")",
"child_str",
"=",
"'{} Child of:'",
".",
"format",
"(",
"repl_str",
")",
"for",
"i",
",",
"parent",
"in",
"enumerate",
"(",
"parents",
")",
":",
"if",
"classes",
".",
"have_same_source",
"(",
"parent",
",",
"cls_inst",
")",
":",
"cls_name",
"=",
"parent",
".",
"name",
"else",
":",
"# Apply the long name to make sphinx-links work across",
"# documents",
"cls_name",
"=",
"parent",
".",
"long_name",
"child_str",
"+=",
"' :class:`{}`'",
".",
"format",
"(",
"cls_name",
")",
"if",
"i",
"<",
"len",
"(",
"parents",
")",
"-",
"1",
":",
"# Add separator except after last entry",
"child_str",
"+=",
"', '",
"child_str",
"+=",
"'\\n\\n'",
"rst_data",
"=",
"rst_data",
".",
"replace",
"(",
"repl_str",
",",
"child_str",
")",
"return",
"rst_data"
] | https://github.com/widelands/widelands/blob/e9f047d46a23d81312237d52eabf7d74e8de52d6/doc/sphinx/documentation_enhancements.py#L238-L264 |
|
DaFuCoding/MTCNN_Caffe | 09c30c3ff391bd9cb6b249c1910afaf147767ab3 | scripts/cpp_lint.py | python | _SetFilters | (filters) | Sets the module's error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die. | Sets the module's error-message filters. | [
"Sets",
"the",
"module",
"s",
"error",
"-",
"message",
"filters",
"."
] | def _SetFilters(filters):
"""Sets the module's error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.SetFilters(filters) | [
"def",
"_SetFilters",
"(",
"filters",
")",
":",
"_cpplint_state",
".",
"SetFilters",
"(",
"filters",
")"
] | https://github.com/DaFuCoding/MTCNN_Caffe/blob/09c30c3ff391bd9cb6b249c1910afaf147767ab3/scripts/cpp_lint.py#L797-L807 |
||
mhammond/pywin32 | 44afd86ba8485194df93234639243252deeb40d5 | com/win32comext/ifilter/demo/filterDemo.py | python | FileParser._get_text | (self, body_chunks) | Gets all the text for a particular chunk. We need to keep calling get text till all the
segments for this chunk are retrieved | Gets all the text for a particular chunk. We need to keep calling get text till all the
segments for this chunk are retrieved | [
"Gets",
"all",
"the",
"text",
"for",
"a",
"particular",
"chunk",
".",
"We",
"need",
"to",
"keep",
"calling",
"get",
"text",
"till",
"all",
"the",
"segments",
"for",
"this",
"chunk",
"are",
"retrieved"
] | def _get_text(self, body_chunks):
"""
Gets all the text for a particular chunk. We need to keep calling get text till all the
segments for this chunk are retrieved
"""
while True:
try:
body_chunks.append(self.f.GetText())
except pythoncom.com_error as e:
if e[0] in [
FILTER_E_NO_MORE_TEXT,
FILTER_E_NO_MORE_TEXT,
FILTER_E_NO_TEXT,
]:
break
else:
raise | [
"def",
"_get_text",
"(",
"self",
",",
"body_chunks",
")",
":",
"while",
"True",
":",
"try",
":",
"body_chunks",
".",
"append",
"(",
"self",
".",
"f",
".",
"GetText",
"(",
")",
")",
"except",
"pythoncom",
".",
"com_error",
"as",
"e",
":",
"if",
"e",
"[",
"0",
"]",
"in",
"[",
"FILTER_E_NO_MORE_TEXT",
",",
"FILTER_E_NO_MORE_TEXT",
",",
"FILTER_E_NO_TEXT",
",",
"]",
":",
"break",
"else",
":",
"raise"
] | https://github.com/mhammond/pywin32/blob/44afd86ba8485194df93234639243252deeb40d5/com/win32comext/ifilter/demo/filterDemo.py#L170-L186 |
||
ChromiumWebApps/chromium | c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7 | third_party/jinja2/environment.py | python | Template.from_code | (cls, environment, code, globals, uptodate=None) | return rv | Creates a template object from compiled code and the globals. This
is used by the loaders and environment to create a template object. | Creates a template object from compiled code and the globals. This
is used by the loaders and environment to create a template object. | [
"Creates",
"a",
"template",
"object",
"from",
"compiled",
"code",
"and",
"the",
"globals",
".",
"This",
"is",
"used",
"by",
"the",
"loaders",
"and",
"environment",
"to",
"create",
"a",
"template",
"object",
"."
] | def from_code(cls, environment, code, globals, uptodate=None):
"""Creates a template object from compiled code and the globals. This
is used by the loaders and environment to create a template object.
"""
namespace = {
'environment': environment,
'__file__': code.co_filename
}
exec(code, namespace)
rv = cls._from_namespace(environment, namespace, globals)
rv._uptodate = uptodate
return rv | [
"def",
"from_code",
"(",
"cls",
",",
"environment",
",",
"code",
",",
"globals",
",",
"uptodate",
"=",
"None",
")",
":",
"namespace",
"=",
"{",
"'environment'",
":",
"environment",
",",
"'__file__'",
":",
"code",
".",
"co_filename",
"}",
"exec",
"(",
"code",
",",
"namespace",
")",
"rv",
"=",
"cls",
".",
"_from_namespace",
"(",
"environment",
",",
"namespace",
",",
"globals",
")",
"rv",
".",
"_uptodate",
"=",
"uptodate",
"return",
"rv"
] | https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/third_party/jinja2/environment.py#L909-L920 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/io/pytables.py | python | Table.ncols | (self) | return sum(len(a.values) for a in self.values_axes) | the number of total columns in the values axes | the number of total columns in the values axes | [
"the",
"number",
"of",
"total",
"columns",
"in",
"the",
"values",
"axes"
] | def ncols(self) -> int:
""" the number of total columns in the values axes """
return sum(len(a.values) for a in self.values_axes) | [
"def",
"ncols",
"(",
"self",
")",
"->",
"int",
":",
"return",
"sum",
"(",
"len",
"(",
"a",
".",
"values",
")",
"for",
"a",
"in",
"self",
".",
"values_axes",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/io/pytables.py#L3280-L3282 |
|
hpi-xnor/BMXNet | ed0b201da6667887222b8e4b5f997c4f6b61943d | python/mxnet/image/detection.py | python | DetRandomCropAug._check_satisfy_constraints | (self, label, xmin, ymin, xmax, ymax, width, height) | Check if constrains are satisfied | Check if constrains are satisfied | [
"Check",
"if",
"constrains",
"are",
"satisfied"
] | def _check_satisfy_constraints(self, label, xmin, ymin, xmax, ymax, width, height):
"""Check if constrains are satisfied"""
if (xmax - xmin) * (ymax - ymin) < 2:
return False # only 1 pixel
x1 = float(xmin) / width
y1 = float(ymin) / height
x2 = float(xmax) / width
y2 = float(ymax) / height
object_areas = self._calculate_areas(label[:, 1:])
valid_objects = np.where(object_areas * width * height > 2)[0]
if valid_objects.size < 1:
return False
intersects = self._intersect(label[valid_objects, 1:], x1, y1, x2, y2)
coverages = self._calculate_areas(intersects) / object_areas[valid_objects]
coverages = coverages[np.where(coverages > 0)[0]]
if coverages.size > 0 and np.amin(coverages) > self.min_object_covered:
return True | [
"def",
"_check_satisfy_constraints",
"(",
"self",
",",
"label",
",",
"xmin",
",",
"ymin",
",",
"xmax",
",",
"ymax",
",",
"width",
",",
"height",
")",
":",
"if",
"(",
"xmax",
"-",
"xmin",
")",
"*",
"(",
"ymax",
"-",
"ymin",
")",
"<",
"2",
":",
"return",
"False",
"# only 1 pixel",
"x1",
"=",
"float",
"(",
"xmin",
")",
"/",
"width",
"y1",
"=",
"float",
"(",
"ymin",
")",
"/",
"height",
"x2",
"=",
"float",
"(",
"xmax",
")",
"/",
"width",
"y2",
"=",
"float",
"(",
"ymax",
")",
"/",
"height",
"object_areas",
"=",
"self",
".",
"_calculate_areas",
"(",
"label",
"[",
":",
",",
"1",
":",
"]",
")",
"valid_objects",
"=",
"np",
".",
"where",
"(",
"object_areas",
"*",
"width",
"*",
"height",
">",
"2",
")",
"[",
"0",
"]",
"if",
"valid_objects",
".",
"size",
"<",
"1",
":",
"return",
"False",
"intersects",
"=",
"self",
".",
"_intersect",
"(",
"label",
"[",
"valid_objects",
",",
"1",
":",
"]",
",",
"x1",
",",
"y1",
",",
"x2",
",",
"y2",
")",
"coverages",
"=",
"self",
".",
"_calculate_areas",
"(",
"intersects",
")",
"/",
"object_areas",
"[",
"valid_objects",
"]",
"coverages",
"=",
"coverages",
"[",
"np",
".",
"where",
"(",
"coverages",
">",
"0",
")",
"[",
"0",
"]",
"]",
"if",
"coverages",
".",
"size",
">",
"0",
"and",
"np",
".",
"amin",
"(",
"coverages",
")",
">",
"self",
".",
"min_object_covered",
":",
"return",
"True"
] | https://github.com/hpi-xnor/BMXNet/blob/ed0b201da6667887222b8e4b5f997c4f6b61943d/python/mxnet/image/detection.py#L233-L249 |
||
mozilla/DeepSpeech | aa1d28530d531d0d92289bf5f11a49fe516fdc86 | data/lm/generate_lm.py | python | convert_and_filter_topk | (args) | return data_lower, vocab_str | Convert to lowercase, count word occurrences and save top-k words to a file | Convert to lowercase, count word occurrences and save top-k words to a file | [
"Convert",
"to",
"lowercase",
"count",
"word",
"occurrences",
"and",
"save",
"top",
"-",
"k",
"words",
"to",
"a",
"file"
] | def convert_and_filter_topk(args):
""" Convert to lowercase, count word occurrences and save top-k words to a file """
counter = Counter()
data_lower = os.path.join(args.output_dir, "lower.txt.gz")
print("\nConverting to lowercase and counting word occurrences ...")
with io.TextIOWrapper(
io.BufferedWriter(gzip.open(data_lower, "w+")), encoding="utf-8"
) as file_out:
# Open the input file either from input.txt or input.txt.gz
_, file_extension = os.path.splitext(args.input_txt)
if file_extension == ".gz":
file_in = io.TextIOWrapper(
io.BufferedReader(gzip.open(args.input_txt)), encoding="utf-8"
)
else:
file_in = open(args.input_txt, encoding="utf-8")
for line in progressbar.progressbar(file_in):
line_lower = line.lower()
counter.update(line_lower.split())
file_out.write(line_lower)
file_in.close()
# Save top-k words
print("\nSaving top {} words ...".format(args.top_k))
top_counter = counter.most_common(args.top_k)
vocab_str = "\n".join(word for word, count in top_counter)
vocab_path = "vocab-{}.txt".format(args.top_k)
vocab_path = os.path.join(args.output_dir, vocab_path)
with open(vocab_path, "w+") as file:
file.write(vocab_str)
print("\nCalculating word statistics ...")
total_words = sum(counter.values())
print(" Your text file has {} words in total".format(total_words))
print(" It has {} unique words".format(len(counter)))
top_words_sum = sum(count for word, count in top_counter)
word_fraction = (top_words_sum / total_words) * 100
print(
" Your top-{} words are {:.4f} percent of all words".format(
args.top_k, word_fraction
)
)
print(' Your most common word "{}" occurred {} times'.format(*top_counter[0]))
last_word, last_count = top_counter[-1]
print(
' The least common word in your top-k is "{}" with {} times'.format(
last_word, last_count
)
)
for i, (w, c) in enumerate(reversed(top_counter)):
if c > last_count:
print(
' The first word with {} occurrences is "{}" at place {}'.format(
c, w, len(top_counter) - 1 - i
)
)
break
return data_lower, vocab_str | [
"def",
"convert_and_filter_topk",
"(",
"args",
")",
":",
"counter",
"=",
"Counter",
"(",
")",
"data_lower",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"output_dir",
",",
"\"lower.txt.gz\"",
")",
"print",
"(",
"\"\\nConverting to lowercase and counting word occurrences ...\"",
")",
"with",
"io",
".",
"TextIOWrapper",
"(",
"io",
".",
"BufferedWriter",
"(",
"gzip",
".",
"open",
"(",
"data_lower",
",",
"\"w+\"",
")",
")",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"as",
"file_out",
":",
"# Open the input file either from input.txt or input.txt.gz",
"_",
",",
"file_extension",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"args",
".",
"input_txt",
")",
"if",
"file_extension",
"==",
"\".gz\"",
":",
"file_in",
"=",
"io",
".",
"TextIOWrapper",
"(",
"io",
".",
"BufferedReader",
"(",
"gzip",
".",
"open",
"(",
"args",
".",
"input_txt",
")",
")",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"else",
":",
"file_in",
"=",
"open",
"(",
"args",
".",
"input_txt",
",",
"encoding",
"=",
"\"utf-8\"",
")",
"for",
"line",
"in",
"progressbar",
".",
"progressbar",
"(",
"file_in",
")",
":",
"line_lower",
"=",
"line",
".",
"lower",
"(",
")",
"counter",
".",
"update",
"(",
"line_lower",
".",
"split",
"(",
")",
")",
"file_out",
".",
"write",
"(",
"line_lower",
")",
"file_in",
".",
"close",
"(",
")",
"# Save top-k words",
"print",
"(",
"\"\\nSaving top {} words ...\"",
".",
"format",
"(",
"args",
".",
"top_k",
")",
")",
"top_counter",
"=",
"counter",
".",
"most_common",
"(",
"args",
".",
"top_k",
")",
"vocab_str",
"=",
"\"\\n\"",
".",
"join",
"(",
"word",
"for",
"word",
",",
"count",
"in",
"top_counter",
")",
"vocab_path",
"=",
"\"vocab-{}.txt\"",
".",
"format",
"(",
"args",
".",
"top_k",
")",
"vocab_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"output_dir",
",",
"vocab_path",
")",
"with",
"open",
"(",
"vocab_path",
",",
"\"w+\"",
")",
"as",
"file",
":",
"file",
".",
"write",
"(",
"vocab_str",
")",
"print",
"(",
"\"\\nCalculating word statistics ...\"",
")",
"total_words",
"=",
"sum",
"(",
"counter",
".",
"values",
"(",
")",
")",
"print",
"(",
"\" Your text file has {} words in total\"",
".",
"format",
"(",
"total_words",
")",
")",
"print",
"(",
"\" It has {} unique words\"",
".",
"format",
"(",
"len",
"(",
"counter",
")",
")",
")",
"top_words_sum",
"=",
"sum",
"(",
"count",
"for",
"word",
",",
"count",
"in",
"top_counter",
")",
"word_fraction",
"=",
"(",
"top_words_sum",
"/",
"total_words",
")",
"*",
"100",
"print",
"(",
"\" Your top-{} words are {:.4f} percent of all words\"",
".",
"format",
"(",
"args",
".",
"top_k",
",",
"word_fraction",
")",
")",
"print",
"(",
"' Your most common word \"{}\" occurred {} times'",
".",
"format",
"(",
"*",
"top_counter",
"[",
"0",
"]",
")",
")",
"last_word",
",",
"last_count",
"=",
"top_counter",
"[",
"-",
"1",
"]",
"print",
"(",
"' The least common word in your top-k is \"{}\" with {} times'",
".",
"format",
"(",
"last_word",
",",
"last_count",
")",
")",
"for",
"i",
",",
"(",
"w",
",",
"c",
")",
"in",
"enumerate",
"(",
"reversed",
"(",
"top_counter",
")",
")",
":",
"if",
"c",
">",
"last_count",
":",
"print",
"(",
"' The first word with {} occurrences is \"{}\" at place {}'",
".",
"format",
"(",
"c",
",",
"w",
",",
"len",
"(",
"top_counter",
")",
"-",
"1",
"-",
"i",
")",
")",
"break",
"return",
"data_lower",
",",
"vocab_str"
] | https://github.com/mozilla/DeepSpeech/blob/aa1d28530d531d0d92289bf5f11a49fe516fdc86/data/lm/generate_lm.py#L11-L74 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/_core.py | python | ItemContainer.Insert | (*args, **kwargs) | return _core_.ItemContainer_Insert(*args, **kwargs) | Insert(self, String item, int pos, PyObject clientData=None) -> int
Insert an item into the control before the item at the ``pos`` index,
optionally associating some data object with the item. | Insert(self, String item, int pos, PyObject clientData=None) -> int | [
"Insert",
"(",
"self",
"String",
"item",
"int",
"pos",
"PyObject",
"clientData",
"=",
"None",
")",
"-",
">",
"int"
] | def Insert(*args, **kwargs):
"""
Insert(self, String item, int pos, PyObject clientData=None) -> int
Insert an item into the control before the item at the ``pos`` index,
optionally associating some data object with the item.
"""
return _core_.ItemContainer_Insert(*args, **kwargs) | [
"def",
"Insert",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core_",
".",
"ItemContainer_Insert",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_core.py#L12876-L12883 |
|
cinder/Cinder | e83f5bb9c01a63eec20168d02953a0879e5100f7 | docs/generateDocs.py | python | update_link | (link, in_path, out_path) | return rel_link_path | Update the given link to point to something relative to the new path
:param link: The link to change
:param in_path: the original path to the file that the link lives in
:return: | Update the given link to point to something relative to the new path
:param link: The link to change
:param in_path: the original path to the file that the link lives in
:return: | [
"Update",
"the",
"given",
"link",
"to",
"point",
"to",
"something",
"relative",
"to",
"the",
"new",
"path",
":",
"param",
"link",
":",
"The",
"link",
"to",
"change",
":",
"param",
"in_path",
":",
"the",
"original",
"path",
"to",
"the",
"file",
"that",
"the",
"link",
"lives",
"in",
":",
"return",
":"
] | def update_link(link, in_path, out_path):
"""
Update the given link to point to something relative to the new path
:param link: The link to change
:param in_path: the original path to the file that the link lives in
:return:
"""
if link.startswith("http") or link.startswith("javascript:") or link.startswith("#"):
return link
SEPARATOR = '/'
in_path = in_path.replace('\\', SEPARATOR)
out_path = out_path.replace('\\', SEPARATOR)
link = link.replace('\\', SEPARATOR)
base_path = BASE_PATH.replace('\\', SEPARATOR)
# if a relative path, make it absolute
if in_path.find(base_path) < 0:
in_path = base_path + in_path
# get absolute in path
abs_link_path = update_link_abs(link, in_path)
# convert to relative link in relation to the out path
src_base = in_path.split(base_path)[1].split(SEPARATOR)[0] # likely htmlsrc
dest_base = out_path.split(base_path)[1].split(SEPARATOR)[0] # htmlsrc or html
abs_dest = posixpath.dirname(out_path).replace('\\', SEPARATOR)
abs_link = abs_link_path.replace(src_base, dest_base)
# if not posixpath.isabs(abs_link):
# abs_link = "/" + abs_link
rel_link_path = relative_url(abs_dest, abs_link)
return rel_link_path | [
"def",
"update_link",
"(",
"link",
",",
"in_path",
",",
"out_path",
")",
":",
"if",
"link",
".",
"startswith",
"(",
"\"http\"",
")",
"or",
"link",
".",
"startswith",
"(",
"\"javascript:\"",
")",
"or",
"link",
".",
"startswith",
"(",
"\"#\"",
")",
":",
"return",
"link",
"SEPARATOR",
"=",
"'/'",
"in_path",
"=",
"in_path",
".",
"replace",
"(",
"'\\\\'",
",",
"SEPARATOR",
")",
"out_path",
"=",
"out_path",
".",
"replace",
"(",
"'\\\\'",
",",
"SEPARATOR",
")",
"link",
"=",
"link",
".",
"replace",
"(",
"'\\\\'",
",",
"SEPARATOR",
")",
"base_path",
"=",
"BASE_PATH",
".",
"replace",
"(",
"'\\\\'",
",",
"SEPARATOR",
")",
"# if a relative path, make it absolute",
"if",
"in_path",
".",
"find",
"(",
"base_path",
")",
"<",
"0",
":",
"in_path",
"=",
"base_path",
"+",
"in_path",
"# get absolute in path",
"abs_link_path",
"=",
"update_link_abs",
"(",
"link",
",",
"in_path",
")",
"# convert to relative link in relation to the out path",
"src_base",
"=",
"in_path",
".",
"split",
"(",
"base_path",
")",
"[",
"1",
"]",
".",
"split",
"(",
"SEPARATOR",
")",
"[",
"0",
"]",
"# likely htmlsrc",
"dest_base",
"=",
"out_path",
".",
"split",
"(",
"base_path",
")",
"[",
"1",
"]",
".",
"split",
"(",
"SEPARATOR",
")",
"[",
"0",
"]",
"# htmlsrc or html",
"abs_dest",
"=",
"posixpath",
".",
"dirname",
"(",
"out_path",
")",
".",
"replace",
"(",
"'\\\\'",
",",
"SEPARATOR",
")",
"abs_link",
"=",
"abs_link_path",
".",
"replace",
"(",
"src_base",
",",
"dest_base",
")",
"# if not posixpath.isabs(abs_link):",
"# abs_link = \"/\" + abs_link",
"rel_link_path",
"=",
"relative_url",
"(",
"abs_dest",
",",
"abs_link",
")",
"return",
"rel_link_path"
] | https://github.com/cinder/Cinder/blob/e83f5bb9c01a63eec20168d02953a0879e5100f7/docs/generateDocs.py#L3046-L3081 |
|
miyosuda/TensorFlowAndroidMNIST | 7b5a4603d2780a8a2834575706e9001977524007 | jni-build/jni/include/tensorflow/tensorboard/lib/python/json_util.py | python | WrapSpecialFloats | (obj) | Replaces all instances of Infinity/-Infinity/NaN with strings. | Replaces all instances of Infinity/-Infinity/NaN with strings. | [
"Replaces",
"all",
"instances",
"of",
"Infinity",
"/",
"-",
"Infinity",
"/",
"NaN",
"with",
"strings",
"."
] | def WrapSpecialFloats(obj):
"""Replaces all instances of Infinity/-Infinity/NaN with strings."""
if obj == float('inf'):
return 'Infinity'
elif obj == float('-inf'):
return '-Infinity'
elif isinstance(obj, float) and math.isnan(obj):
return 'NaN'
elif isinstance(obj, list) or isinstance(obj, tuple):
return list(map(WrapSpecialFloats, obj))
elif isinstance(obj, dict):
return {
WrapSpecialFloats(k): WrapSpecialFloats(v)
for k, v in obj.items()
}
else:
return obj | [
"def",
"WrapSpecialFloats",
"(",
"obj",
")",
":",
"if",
"obj",
"==",
"float",
"(",
"'inf'",
")",
":",
"return",
"'Infinity'",
"elif",
"obj",
"==",
"float",
"(",
"'-inf'",
")",
":",
"return",
"'-Infinity'",
"elif",
"isinstance",
"(",
"obj",
",",
"float",
")",
"and",
"math",
".",
"isnan",
"(",
"obj",
")",
":",
"return",
"'NaN'",
"elif",
"isinstance",
"(",
"obj",
",",
"list",
")",
"or",
"isinstance",
"(",
"obj",
",",
"tuple",
")",
":",
"return",
"list",
"(",
"map",
"(",
"WrapSpecialFloats",
",",
"obj",
")",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"dict",
")",
":",
"return",
"{",
"WrapSpecialFloats",
"(",
"k",
")",
":",
"WrapSpecialFloats",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"obj",
".",
"items",
"(",
")",
"}",
"else",
":",
"return",
"obj"
] | https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/tensorboard/lib/python/json_util.py#L33-L49 |
||
SoarGroup/Soar | a1c5e249499137a27da60533c72969eef3b8ab6b | scons/scons-local-4.1.0/SCons/Node/FS.py | python | File.get_contents_sig | (self) | return result | A helper method for get_cachedir_bsig.
It computes and returns the signature for this
node's contents. | A helper method for get_cachedir_bsig. | [
"A",
"helper",
"method",
"for",
"get_cachedir_bsig",
"."
] | def get_contents_sig(self):
"""
A helper method for get_cachedir_bsig.
It computes and returns the signature for this
node's contents.
"""
try:
return self.contentsig
except AttributeError:
pass
executor = self.get_executor()
result = self.contentsig = MD5signature(executor.get_contents())
return result | [
"def",
"get_contents_sig",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"contentsig",
"except",
"AttributeError",
":",
"pass",
"executor",
"=",
"self",
".",
"get_executor",
"(",
")",
"result",
"=",
"self",
".",
"contentsig",
"=",
"MD5signature",
"(",
"executor",
".",
"get_contents",
"(",
")",
")",
"return",
"result"
] | https://github.com/SoarGroup/Soar/blob/a1c5e249499137a27da60533c72969eef3b8ab6b/scons/scons-local-4.1.0/SCons/Node/FS.py#L3620-L3636 |
|
TheLegendAli/DeepLab-Context | fb04e9e2fc2682490ad9f60533b9d6c4c0e0479c | scripts/cpp_lint.py | python | CheckLanguage | (filename, clean_lines, linenum, file_extension,
include_state, nesting_state, error) | Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found. | Checks rules from the 'C++ language rules' section of cppguide.html. | [
"Checks",
"rules",
"from",
"the",
"C",
"++",
"language",
"rules",
"section",
"of",
"cppguide",
".",
"html",
"."
] | def CheckLanguage(filename, clean_lines, linenum, file_extension,
include_state, nesting_state, error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[linenum]
if not line:
return
match = _RE_PATTERN_INCLUDE.search(line)
if match:
CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
return
# Reset include state across preprocessor directives. This is meant
# to silence warnings for conditional includes.
if Match(r'^\s*#\s*(?:ifdef|elif|else|endif)\b', line):
include_state.ResetSection()
# Make Windows paths like Unix.
fullname = os.path.abspath(filename).replace('\\', '/')
# TODO(unknown): figure out if they're using default arguments in fn proto.
# Check to see if they're using an conversion function cast.
# I just try to capture the most common basic types, though there are more.
# Parameterless conversion functions, such as bool(), are allowed as they are
# probably a member operator declaration or default constructor.
match = Search(
r'(\bnew\s+)?\b' # Grab 'new' operator, if it's there
r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
r'(\([^)].*)', line)
if match:
matched_new = match.group(1)
matched_type = match.group(2)
matched_funcptr = match.group(3)
# gMock methods are defined using some variant of MOCK_METHODx(name, type)
# where type may be float(), int(string), etc. Without context they are
# virtually indistinguishable from int(x) casts. Likewise, gMock's
# MockCallback takes a template parameter of the form return_type(arg_type),
# which looks much like the cast we're trying to detect.
#
# std::function<> wrapper has a similar problem.
#
# Return types for function pointers also look like casts if they
# don't have an extra space.
if (matched_new is None and # If new operator, then this isn't a cast
not (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
Search(r'\bMockCallback<.*>', line) or
Search(r'\bstd::function<.*>', line)) and
not (matched_funcptr and
Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
matched_funcptr))):
# Try a bit harder to catch gmock lines: the only place where
# something looks like an old-style cast is where we declare the
# return type of the mocked method, and the only time when we
# are missing context is if MOCK_METHOD was split across
# multiple lines. The missing MOCK_METHOD is usually one or two
# lines back, so scan back one or two lines.
#
# It's not possible for gmock macros to appear in the first 2
# lines, since the class head + section name takes up 2 lines.
if (linenum < 2 or
not (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
clean_lines.elided[linenum - 1]) or
Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
clean_lines.elided[linenum - 2]))):
error(filename, linenum, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
matched_type)
CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
# This doesn't catch all cases. Consider (const char * const)"hello".
#
# (char *) "foo" should always be a const_cast (reinterpret_cast won't
# compile).
if CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'const_cast', r'\((char\s?\*+\s?)\)\s*"', error):
pass
else:
# Check pointer casts for other than string constants
CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error)
# In addition, we look for people taking the address of a cast. This
# is dangerous -- casts can assign to temporaries, so the pointer doesn't
# point where you think.
match = Search(
r'(?:&\(([^)]+)\)[\w(])|'
r'(?:&(static|dynamic|down|reinterpret)_cast\b)', line)
if match and match.group(1) != '*':
error(filename, linenum, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'))
# Create an extended_line, which is the concatenation of the current and
# next lines, for more effective checking of code that may span more than one
# line.
if linenum + 1 < clean_lines.NumLines():
extended_line = line + clean_lines.elided[linenum + 1]
else:
extended_line = line
# Check for people declaring static/global STL strings at the top level.
# This is dangerous because the C++ language does not guarantee that
# globals with constructors are initialized before the first access.
match = Match(
r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
line)
# Make sure it's not a function.
# Function template specialization looks like: "string foo<Type>(...".
# Class template definitions look like: "string Foo<Type>::Method(...".
#
# Also ignore things that look like operators. These are matched separately
# because operator names cross non-word boundaries. If we change the pattern
# above, we would decrease the accuracy of matching identifiers.
if (match and
not Search(r'\boperator\W', line) and
not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)', match.group(3))):
error(filename, linenum, 'runtime/string', 4,
'For a static/global string constant, use a C style string instead: '
'"%schar %s[]".' %
(match.group(1), match.group(2)))
if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
error(filename, linenum, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
if file_extension == 'h':
# TODO(unknown): check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in CheckForNonStandardConstructs for now)
# TODO(unknown): check that classes have DISALLOW_EVIL_CONSTRUCTORS
# (level 1 error)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if Search(r'\bshort port\b', line):
if not Search(r'\bunsigned short port\b', line):
error(filename, linenum, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
else:
match = Search(r'\b(short|long(?! +double)|long long)\b', line)
if match:
error(filename, linenum, 'runtime/int', 4,
'Use int16/int64/etc, rather than the C type %s' % match.group(1))
# When snprintf is used, the second argument shouldn't be a literal.
match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if match and match.group(2) != '0':
# If 2nd arg is zero, snprintf is used to calculate size.
error(filename, linenum, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (match.group(1), match.group(2)))
# Check if some verboten C functions are being used.
if Search(r'\bsprintf\b', line):
error(filename, linenum, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
match = Search(r'\b(strcpy|strcat)\b', line)
if match:
error(filename, linenum, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % match.group(1))
# Check if some verboten operator overloading is going on
# TODO(unknown): catch out-of-line unary operator&:
# class X {};
# int operator&(const X& x) { return 42; } // unary operator&
# The trick is it's hard to tell apart from binary operator&:
# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
if Search(r'\boperator\s*&\s*\(\s*\)', line):
error(filename, linenum, 'runtime/operator', 4,
'Unary operator& is dangerous. Do not use it.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if Search(r'\}\s*if\s*\(', line):
error(filename, linenum, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
# TODO(sugawarayu): Catch the following case. Need to change the calling
# convention of the whole function to process multiple line to handle it.
# printf(
# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
if printf_args:
match = Match(r'([\w.\->()]+)$', printf_args)
if match and match.group(1) != '__VA_ARGS__':
function_name = re.search(r'\b((?:string)?printf)\s*\(',
line, re.I).group(1)
error(filename, linenum, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (function_name, match.group(1)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
error(filename, linenum, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (match.group(1), match.group(2)))
if Search(r'\busing namespace\b', line):
error(filename, linenum, 'build/namespaces', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
# Detect variable-length arrays.
match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
match.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if Search(r'sizeof\(.+\)', tok): continue
if Search(r'arraysize\(\w+\)', tok): continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok: continue
if Match(r'\d+', tok): continue
if Match(r'0[xX][0-9a-fA-F]+', tok): continue
if Match(r'k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token because we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(filename, linenum, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# If DISALLOW_EVIL_CONSTRUCTORS, DISALLOW_COPY_AND_ASSIGN, or
# DISALLOW_IMPLICIT_CONSTRUCTORS is present, then it should be the last thing
# in the class declaration.
match = Match(
(r'\s*'
r'(DISALLOW_(EVIL_CONSTRUCTORS|COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))'
r'\(.*\);$'),
line)
if match and linenum + 1 < clean_lines.NumLines():
next_line = clean_lines.elided[linenum + 1]
# We allow some, but not all, declarations of variables to be present
# in the statement that defines the class. The [\w\*,\s]* fragment of
# the regular expression below allows users to declare instances of
# the class or pointers to instances, but not less common types such
# as function pointers or arrays. It's a tradeoff between allowing
# reasonable code and avoiding trying to parse more C++ using regexps.
if not Search(r'^\s*}[\w\*,\s]*;', next_line):
error(filename, linenum, 'readability/constructors', 3,
match.group(1) + ' should be the last thing in the class')
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (file_extension == 'h'
and Search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(filename, linenum, 'build/namespaces', 4,
'Do not use unnamed namespaces in header files. See '
'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.') | [
"def",
"CheckLanguage",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"file_extension",
",",
"include_state",
",",
"nesting_state",
",",
"error",
")",
":",
"# If the line is empty or consists of entirely a comment, no need to",
"# check it.",
"line",
"=",
"clean_lines",
".",
"elided",
"[",
"linenum",
"]",
"if",
"not",
"line",
":",
"return",
"match",
"=",
"_RE_PATTERN_INCLUDE",
".",
"search",
"(",
"line",
")",
"if",
"match",
":",
"CheckIncludeLine",
"(",
"filename",
",",
"clean_lines",
",",
"linenum",
",",
"include_state",
",",
"error",
")",
"return",
"# Reset include state across preprocessor directives. This is meant",
"# to silence warnings for conditional includes.",
"if",
"Match",
"(",
"r'^\\s*#\\s*(?:ifdef|elif|else|endif)\\b'",
",",
"line",
")",
":",
"include_state",
".",
"ResetSection",
"(",
")",
"# Make Windows paths like Unix.",
"fullname",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"filename",
")",
".",
"replace",
"(",
"'\\\\'",
",",
"'/'",
")",
"# TODO(unknown): figure out if they're using default arguments in fn proto.",
"# Check to see if they're using an conversion function cast.",
"# I just try to capture the most common basic types, though there are more.",
"# Parameterless conversion functions, such as bool(), are allowed as they are",
"# probably a member operator declaration or default constructor.",
"match",
"=",
"Search",
"(",
"r'(\\bnew\\s+)?\\b'",
"# Grab 'new' operator, if it's there",
"r'(int|float|double|bool|char|int32|uint32|int64|uint64)'",
"r'(\\([^)].*)'",
",",
"line",
")",
"if",
"match",
":",
"matched_new",
"=",
"match",
".",
"group",
"(",
"1",
")",
"matched_type",
"=",
"match",
".",
"group",
"(",
"2",
")",
"matched_funcptr",
"=",
"match",
".",
"group",
"(",
"3",
")",
"# gMock methods are defined using some variant of MOCK_METHODx(name, type)",
"# where type may be float(), int(string), etc. Without context they are",
"# virtually indistinguishable from int(x) casts. Likewise, gMock's",
"# MockCallback takes a template parameter of the form return_type(arg_type),",
"# which looks much like the cast we're trying to detect.",
"#",
"# std::function<> wrapper has a similar problem.",
"#",
"# Return types for function pointers also look like casts if they",
"# don't have an extra space.",
"if",
"(",
"matched_new",
"is",
"None",
"and",
"# If new operator, then this isn't a cast",
"not",
"(",
"Match",
"(",
"r'^\\s*MOCK_(CONST_)?METHOD\\d+(_T)?\\('",
",",
"line",
")",
"or",
"Search",
"(",
"r'\\bMockCallback<.*>'",
",",
"line",
")",
"or",
"Search",
"(",
"r'\\bstd::function<.*>'",
",",
"line",
")",
")",
"and",
"not",
"(",
"matched_funcptr",
"and",
"Match",
"(",
"r'\\((?:[^() ]+::\\s*\\*\\s*)?[^() ]+\\)\\s*\\('",
",",
"matched_funcptr",
")",
")",
")",
":",
"# Try a bit harder to catch gmock lines: the only place where",
"# something looks like an old-style cast is where we declare the",
"# return type of the mocked method, and the only time when we",
"# are missing context is if MOCK_METHOD was split across",
"# multiple lines. The missing MOCK_METHOD is usually one or two",
"# lines back, so scan back one or two lines.",
"#",
"# It's not possible for gmock macros to appear in the first 2",
"# lines, since the class head + section name takes up 2 lines.",
"if",
"(",
"linenum",
"<",
"2",
"or",
"not",
"(",
"Match",
"(",
"r'^\\s*MOCK_(?:CONST_)?METHOD\\d+(?:_T)?\\((?:\\S+,)?\\s*$'",
",",
"clean_lines",
".",
"elided",
"[",
"linenum",
"-",
"1",
"]",
")",
"or",
"Match",
"(",
"r'^\\s*MOCK_(?:CONST_)?METHOD\\d+(?:_T)?\\(\\s*$'",
",",
"clean_lines",
".",
"elided",
"[",
"linenum",
"-",
"2",
"]",
")",
")",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'readability/casting'",
",",
"4",
",",
"'Using deprecated casting style. '",
"'Use static_cast<%s>(...) instead'",
"%",
"matched_type",
")",
"CheckCStyleCast",
"(",
"filename",
",",
"linenum",
",",
"line",
",",
"clean_lines",
".",
"raw_lines",
"[",
"linenum",
"]",
",",
"'static_cast'",
",",
"r'\\((int|float|double|bool|char|u?int(16|32|64))\\)'",
",",
"error",
")",
"# This doesn't catch all cases. Consider (const char * const)\"hello\".",
"#",
"# (char *) \"foo\" should always be a const_cast (reinterpret_cast won't",
"# compile).",
"if",
"CheckCStyleCast",
"(",
"filename",
",",
"linenum",
",",
"line",
",",
"clean_lines",
".",
"raw_lines",
"[",
"linenum",
"]",
",",
"'const_cast'",
",",
"r'\\((char\\s?\\*+\\s?)\\)\\s*\"'",
",",
"error",
")",
":",
"pass",
"else",
":",
"# Check pointer casts for other than string constants",
"CheckCStyleCast",
"(",
"filename",
",",
"linenum",
",",
"line",
",",
"clean_lines",
".",
"raw_lines",
"[",
"linenum",
"]",
",",
"'reinterpret_cast'",
",",
"r'\\((\\w+\\s?\\*+\\s?)\\)'",
",",
"error",
")",
"# In addition, we look for people taking the address of a cast. This",
"# is dangerous -- casts can assign to temporaries, so the pointer doesn't",
"# point where you think.",
"match",
"=",
"Search",
"(",
"r'(?:&\\(([^)]+)\\)[\\w(])|'",
"r'(?:&(static|dynamic|down|reinterpret)_cast\\b)'",
",",
"line",
")",
"if",
"match",
"and",
"match",
".",
"group",
"(",
"1",
")",
"!=",
"'*'",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/casting'",
",",
"4",
",",
"(",
"'Are you taking an address of a cast? '",
"'This is dangerous: could be a temp var. '",
"'Take the address before doing the cast, rather than after'",
")",
")",
"# Create an extended_line, which is the concatenation of the current and",
"# next lines, for more effective checking of code that may span more than one",
"# line.",
"if",
"linenum",
"+",
"1",
"<",
"clean_lines",
".",
"NumLines",
"(",
")",
":",
"extended_line",
"=",
"line",
"+",
"clean_lines",
".",
"elided",
"[",
"linenum",
"+",
"1",
"]",
"else",
":",
"extended_line",
"=",
"line",
"# Check for people declaring static/global STL strings at the top level.",
"# This is dangerous because the C++ language does not guarantee that",
"# globals with constructors are initialized before the first access.",
"match",
"=",
"Match",
"(",
"r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\\b(.*)'",
",",
"line",
")",
"# Make sure it's not a function.",
"# Function template specialization looks like: \"string foo<Type>(...\".",
"# Class template definitions look like: \"string Foo<Type>::Method(...\".",
"#",
"# Also ignore things that look like operators. These are matched separately",
"# because operator names cross non-word boundaries. If we change the pattern",
"# above, we would decrease the accuracy of matching identifiers.",
"if",
"(",
"match",
"and",
"not",
"Search",
"(",
"r'\\boperator\\W'",
",",
"line",
")",
"and",
"not",
"Match",
"(",
"r'\\s*(<.*>)?(::[a-zA-Z0-9_]+)?\\s*\\(([^\"]|$)'",
",",
"match",
".",
"group",
"(",
"3",
")",
")",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/string'",
",",
"4",
",",
"'For a static/global string constant, use a C style string instead: '",
"'\"%schar %s[]\".'",
"%",
"(",
"match",
".",
"group",
"(",
"1",
")",
",",
"match",
".",
"group",
"(",
"2",
")",
")",
")",
"if",
"Search",
"(",
"r'\\b([A-Za-z0-9_]*_)\\(\\1\\)'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/init'",
",",
"4",
",",
"'You seem to be initializing a member variable with itself.'",
")",
"if",
"file_extension",
"==",
"'h'",
":",
"# TODO(unknown): check that 1-arg constructors are explicit.",
"# How to tell it's a constructor?",
"# (handled in CheckForNonStandardConstructs for now)",
"# TODO(unknown): check that classes have DISALLOW_EVIL_CONSTRUCTORS",
"# (level 1 error)",
"pass",
"# Check if people are using the verboten C basic types. The only exception",
"# we regularly allow is \"unsigned short port\" for port.",
"if",
"Search",
"(",
"r'\\bshort port\\b'",
",",
"line",
")",
":",
"if",
"not",
"Search",
"(",
"r'\\bunsigned short port\\b'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/int'",
",",
"4",
",",
"'Use \"unsigned short\" for ports, not \"short\"'",
")",
"else",
":",
"match",
"=",
"Search",
"(",
"r'\\b(short|long(?! +double)|long long)\\b'",
",",
"line",
")",
"if",
"match",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/int'",
",",
"4",
",",
"'Use int16/int64/etc, rather than the C type %s'",
"%",
"match",
".",
"group",
"(",
"1",
")",
")",
"# When snprintf is used, the second argument shouldn't be a literal.",
"match",
"=",
"Search",
"(",
"r'snprintf\\s*\\(([^,]*),\\s*([0-9]*)\\s*,'",
",",
"line",
")",
"if",
"match",
"and",
"match",
".",
"group",
"(",
"2",
")",
"!=",
"'0'",
":",
"# If 2nd arg is zero, snprintf is used to calculate size.",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/printf'",
",",
"3",
",",
"'If you can, use sizeof(%s) instead of %s as the 2nd arg '",
"'to snprintf.'",
"%",
"(",
"match",
".",
"group",
"(",
"1",
")",
",",
"match",
".",
"group",
"(",
"2",
")",
")",
")",
"# Check if some verboten C functions are being used.",
"if",
"Search",
"(",
"r'\\bsprintf\\b'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/printf'",
",",
"5",
",",
"'Never use sprintf. Use snprintf instead.'",
")",
"match",
"=",
"Search",
"(",
"r'\\b(strcpy|strcat)\\b'",
",",
"line",
")",
"if",
"match",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/printf'",
",",
"4",
",",
"'Almost always, snprintf is better than %s'",
"%",
"match",
".",
"group",
"(",
"1",
")",
")",
"# Check if some verboten operator overloading is going on",
"# TODO(unknown): catch out-of-line unary operator&:",
"# class X {};",
"# int operator&(const X& x) { return 42; } // unary operator&",
"# The trick is it's hard to tell apart from binary operator&:",
"# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&",
"if",
"Search",
"(",
"r'\\boperator\\s*&\\s*\\(\\s*\\)'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/operator'",
",",
"4",
",",
"'Unary operator& is dangerous. Do not use it.'",
")",
"# Check for suspicious usage of \"if\" like",
"# } if (a == b) {",
"if",
"Search",
"(",
"r'\\}\\s*if\\s*\\('",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'readability/braces'",
",",
"4",
",",
"'Did you mean \"else if\"? If not, start a new line for \"if\".'",
")",
"# Check for potential format string bugs like printf(foo).",
"# We constrain the pattern not to pick things like DocidForPrintf(foo).",
"# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())",
"# TODO(sugawarayu): Catch the following case. Need to change the calling",
"# convention of the whole function to process multiple line to handle it.",
"# printf(",
"# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);",
"printf_args",
"=",
"_GetTextInside",
"(",
"line",
",",
"r'(?i)\\b(string)?printf\\s*\\('",
")",
"if",
"printf_args",
":",
"match",
"=",
"Match",
"(",
"r'([\\w.\\->()]+)$'",
",",
"printf_args",
")",
"if",
"match",
"and",
"match",
".",
"group",
"(",
"1",
")",
"!=",
"'__VA_ARGS__'",
":",
"function_name",
"=",
"re",
".",
"search",
"(",
"r'\\b((?:string)?printf)\\s*\\('",
",",
"line",
",",
"re",
".",
"I",
")",
".",
"group",
"(",
"1",
")",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/printf'",
",",
"4",
",",
"'Potential format string bug. Do %s(\"%%s\", %s) instead.'",
"%",
"(",
"function_name",
",",
"match",
".",
"group",
"(",
"1",
")",
")",
")",
"# Check for potential memset bugs like memset(buf, sizeof(buf), 0).",
"match",
"=",
"Search",
"(",
"r'memset\\s*\\(([^,]*),\\s*([^,]*),\\s*0\\s*\\)'",
",",
"line",
")",
"if",
"match",
"and",
"not",
"Match",
"(",
"r\"^''|-?[0-9]+|0x[0-9A-Fa-f]$\"",
",",
"match",
".",
"group",
"(",
"2",
")",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/memset'",
",",
"4",
",",
"'Did you mean \"memset(%s, 0, %s)\"?'",
"%",
"(",
"match",
".",
"group",
"(",
"1",
")",
",",
"match",
".",
"group",
"(",
"2",
")",
")",
")",
"if",
"Search",
"(",
"r'\\busing namespace\\b'",
",",
"line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'build/namespaces'",
",",
"5",
",",
"'Do not use namespace using-directives. '",
"'Use using-declarations instead.'",
")",
"# Detect variable-length arrays.",
"match",
"=",
"Match",
"(",
"r'\\s*(.+::)?(\\w+) [a-z]\\w*\\[(.+)];'",
",",
"line",
")",
"if",
"(",
"match",
"and",
"match",
".",
"group",
"(",
"2",
")",
"!=",
"'return'",
"and",
"match",
".",
"group",
"(",
"2",
")",
"!=",
"'delete'",
"and",
"match",
".",
"group",
"(",
"3",
")",
".",
"find",
"(",
"']'",
")",
"==",
"-",
"1",
")",
":",
"# Split the size using space and arithmetic operators as delimiters.",
"# If any of the resulting tokens are not compile time constants then",
"# report the error.",
"tokens",
"=",
"re",
".",
"split",
"(",
"r'\\s|\\+|\\-|\\*|\\/|<<|>>]'",
",",
"match",
".",
"group",
"(",
"3",
")",
")",
"is_const",
"=",
"True",
"skip_next",
"=",
"False",
"for",
"tok",
"in",
"tokens",
":",
"if",
"skip_next",
":",
"skip_next",
"=",
"False",
"continue",
"if",
"Search",
"(",
"r'sizeof\\(.+\\)'",
",",
"tok",
")",
":",
"continue",
"if",
"Search",
"(",
"r'arraysize\\(\\w+\\)'",
",",
"tok",
")",
":",
"continue",
"tok",
"=",
"tok",
".",
"lstrip",
"(",
"'('",
")",
"tok",
"=",
"tok",
".",
"rstrip",
"(",
"')'",
")",
"if",
"not",
"tok",
":",
"continue",
"if",
"Match",
"(",
"r'\\d+'",
",",
"tok",
")",
":",
"continue",
"if",
"Match",
"(",
"r'0[xX][0-9a-fA-F]+'",
",",
"tok",
")",
":",
"continue",
"if",
"Match",
"(",
"r'k[A-Z0-9]\\w*'",
",",
"tok",
")",
":",
"continue",
"if",
"Match",
"(",
"r'(.+::)?k[A-Z0-9]\\w*'",
",",
"tok",
")",
":",
"continue",
"if",
"Match",
"(",
"r'(.+::)?[A-Z][A-Z0-9_]*'",
",",
"tok",
")",
":",
"continue",
"# A catch all for tricky sizeof cases, including 'sizeof expression',",
"# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'",
"# requires skipping the next token because we split on ' ' and '*'.",
"if",
"tok",
".",
"startswith",
"(",
"'sizeof'",
")",
":",
"skip_next",
"=",
"True",
"continue",
"is_const",
"=",
"False",
"break",
"if",
"not",
"is_const",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'runtime/arrays'",
",",
"1",
",",
"'Do not use variable-length arrays. Use an appropriately named '",
"\"('k' followed by CamelCase) compile-time constant for the size.\"",
")",
"# If DISALLOW_EVIL_CONSTRUCTORS, DISALLOW_COPY_AND_ASSIGN, or",
"# DISALLOW_IMPLICIT_CONSTRUCTORS is present, then it should be the last thing",
"# in the class declaration.",
"match",
"=",
"Match",
"(",
"(",
"r'\\s*'",
"r'(DISALLOW_(EVIL_CONSTRUCTORS|COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))'",
"r'\\(.*\\);$'",
")",
",",
"line",
")",
"if",
"match",
"and",
"linenum",
"+",
"1",
"<",
"clean_lines",
".",
"NumLines",
"(",
")",
":",
"next_line",
"=",
"clean_lines",
".",
"elided",
"[",
"linenum",
"+",
"1",
"]",
"# We allow some, but not all, declarations of variables to be present",
"# in the statement that defines the class. The [\\w\\*,\\s]* fragment of",
"# the regular expression below allows users to declare instances of",
"# the class or pointers to instances, but not less common types such",
"# as function pointers or arrays. It's a tradeoff between allowing",
"# reasonable code and avoiding trying to parse more C++ using regexps.",
"if",
"not",
"Search",
"(",
"r'^\\s*}[\\w\\*,\\s]*;'",
",",
"next_line",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'readability/constructors'",
",",
"3",
",",
"match",
".",
"group",
"(",
"1",
")",
"+",
"' should be the last thing in the class'",
")",
"# Check for use of unnamed namespaces in header files. Registration",
"# macros are typically OK, so we allow use of \"namespace {\" on lines",
"# that end with backslashes.",
"if",
"(",
"file_extension",
"==",
"'h'",
"and",
"Search",
"(",
"r'\\bnamespace\\s*{'",
",",
"line",
")",
"and",
"line",
"[",
"-",
"1",
"]",
"!=",
"'\\\\'",
")",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'build/namespaces'",
",",
"4",
",",
"'Do not use unnamed namespaces in header files. See '",
"'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'",
"' for more information.'",
")"
] | https://github.com/TheLegendAli/DeepLab-Context/blob/fb04e9e2fc2682490ad9f60533b9d6c4c0e0479c/scripts/cpp_lint.py#L3834-L4132 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/ipaddress.py | python | IPv6Address.sixtofour | (self) | return IPv4Address((self._ip >> 80) & 0xFFFFFFFF) | Return the IPv4 6to4 embedded address.
Returns:
The IPv4 6to4-embedded address if present or None if the
address doesn't appear to contain a 6to4 embedded address. | Return the IPv4 6to4 embedded address. | [
"Return",
"the",
"IPv4",
"6to4",
"embedded",
"address",
"."
] | def sixtofour(self):
"""Return the IPv4 6to4 embedded address.
Returns:
The IPv4 6to4-embedded address if present or None if the
address doesn't appear to contain a 6to4 embedded address.
"""
if (self._ip >> 112) != 0x2002:
return None
return IPv4Address((self._ip >> 80) & 0xFFFFFFFF) | [
"def",
"sixtofour",
"(",
"self",
")",
":",
"if",
"(",
"self",
".",
"_ip",
">>",
"112",
")",
"!=",
"0x2002",
":",
"return",
"None",
"return",
"IPv4Address",
"(",
"(",
"self",
".",
"_ip",
">>",
"80",
")",
"&",
"0xFFFFFFFF",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/ipaddress.py#L2038-L2048 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python3/src/Lib/xml/sax/xmlreader.py | python | AttributesNSImpl.__init__ | (self, attrs, qnames) | NS-aware implementation.
attrs should be of the form {(ns_uri, lname): value, ...}.
qnames of the form {(ns_uri, lname): qname, ...}. | NS-aware implementation. | [
"NS",
"-",
"aware",
"implementation",
"."
] | def __init__(self, attrs, qnames):
"""NS-aware implementation.
attrs should be of the form {(ns_uri, lname): value, ...}.
qnames of the form {(ns_uri, lname): qname, ...}."""
self._attrs = attrs
self._qnames = qnames | [
"def",
"__init__",
"(",
"self",
",",
"attrs",
",",
"qnames",
")",
":",
"self",
".",
"_attrs",
"=",
"attrs",
"self",
".",
"_qnames",
"=",
"qnames"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/xml/sax/xmlreader.py#L342-L348 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_gdi.py | python | PseudoDC.DrawRoundedRectangle | (*args, **kwargs) | return _gdi_.PseudoDC_DrawRoundedRectangle(*args, **kwargs) | DrawRoundedRectangle(self, int x, int y, int width, int height, double radius)
Draws a rectangle with the given top left corner, and with the given
size. The corners are quarter-circles using the given radius. The
current pen is used for the outline and the current brush for filling
the shape.
If radius is positive, the value is assumed to be the radius of the
rounded corner. If radius is negative, the absolute value is assumed
to be the proportion of the smallest dimension of the rectangle. This
means that the corner can be a sensible size relative to the size of
the rectangle, and also avoids the strange effects X produces when the
corners are too big for the rectangle. | DrawRoundedRectangle(self, int x, int y, int width, int height, double radius) | [
"DrawRoundedRectangle",
"(",
"self",
"int",
"x",
"int",
"y",
"int",
"width",
"int",
"height",
"double",
"radius",
")"
] | def DrawRoundedRectangle(*args, **kwargs):
"""
DrawRoundedRectangle(self, int x, int y, int width, int height, double radius)
Draws a rectangle with the given top left corner, and with the given
size. The corners are quarter-circles using the given radius. The
current pen is used for the outline and the current brush for filling
the shape.
If radius is positive, the value is assumed to be the radius of the
rounded corner. If radius is negative, the absolute value is assumed
to be the proportion of the smallest dimension of the rectangle. This
means that the corner can be a sensible size relative to the size of
the rectangle, and also avoids the strange effects X produces when the
corners are too big for the rectangle.
"""
return _gdi_.PseudoDC_DrawRoundedRectangle(*args, **kwargs) | [
"def",
"DrawRoundedRectangle",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_gdi_",
".",
"PseudoDC_DrawRoundedRectangle",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_gdi.py#L7942-L7958 |
|
weolar/miniblink49 | 1c4678db0594a4abde23d3ebbcc7cd13c3170777 | third_party/WebKit/Source/bindings/scripts/idl_reader.py | python | validate_blink_idl_definitions | (idl_filename, idl_file_basename,
definitions) | Validate file contents with filename convention.
The Blink IDL conventions are:
- If an IDL file defines an interface, a dictionary, or an exception,
the IDL file must contain exactly one definition. The definition
name must agree with the file's basename, unless it is a partial
definition. (e.g., 'partial interface Foo' can be in FooBar.idl).
- An IDL file can contain typedefs and enums without having other
definitions. There is no filename convention in this case.
- Otherwise, an IDL file is invalid. | Validate file contents with filename convention. | [
"Validate",
"file",
"contents",
"with",
"filename",
"convention",
"."
] | def validate_blink_idl_definitions(idl_filename, idl_file_basename,
definitions):
"""Validate file contents with filename convention.
The Blink IDL conventions are:
- If an IDL file defines an interface, a dictionary, or an exception,
the IDL file must contain exactly one definition. The definition
name must agree with the file's basename, unless it is a partial
definition. (e.g., 'partial interface Foo' can be in FooBar.idl).
- An IDL file can contain typedefs and enums without having other
definitions. There is no filename convention in this case.
- Otherwise, an IDL file is invalid.
"""
targets = (definitions.interfaces.values() +
definitions.dictionaries.values())
number_of_targets = len(targets)
if number_of_targets > 1:
raise Exception(
'Expected exactly 1 definition in file {0}, but found {1}'
.format(idl_filename, number_of_targets))
if number_of_targets == 0:
if not (definitions.enumerations or definitions.typedefs):
raise Exception(
'No definition found in %s' % idl_filename)
return
target = targets[0]
if not target.is_partial and target.name != idl_file_basename:
raise Exception(
'Definition name "{0}" disagrees with IDL file basename "{1}".'
.format(target.name, idl_file_basename)) | [
"def",
"validate_blink_idl_definitions",
"(",
"idl_filename",
",",
"idl_file_basename",
",",
"definitions",
")",
":",
"targets",
"=",
"(",
"definitions",
".",
"interfaces",
".",
"values",
"(",
")",
"+",
"definitions",
".",
"dictionaries",
".",
"values",
"(",
")",
")",
"number_of_targets",
"=",
"len",
"(",
"targets",
")",
"if",
"number_of_targets",
">",
"1",
":",
"raise",
"Exception",
"(",
"'Expected exactly 1 definition in file {0}, but found {1}'",
".",
"format",
"(",
"idl_filename",
",",
"number_of_targets",
")",
")",
"if",
"number_of_targets",
"==",
"0",
":",
"if",
"not",
"(",
"definitions",
".",
"enumerations",
"or",
"definitions",
".",
"typedefs",
")",
":",
"raise",
"Exception",
"(",
"'No definition found in %s'",
"%",
"idl_filename",
")",
"return",
"target",
"=",
"targets",
"[",
"0",
"]",
"if",
"not",
"target",
".",
"is_partial",
"and",
"target",
".",
"name",
"!=",
"idl_file_basename",
":",
"raise",
"Exception",
"(",
"'Definition name \"{0}\" disagrees with IDL file basename \"{1}\".'",
".",
"format",
"(",
"target",
".",
"name",
",",
"idl_file_basename",
")",
")"
] | https://github.com/weolar/miniblink49/blob/1c4678db0594a4abde23d3ebbcc7cd13c3170777/third_party/WebKit/Source/bindings/scripts/idl_reader.py#L45-L74 |
||
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_controls.py | python | ListCtrl.SetItemTextColour | (*args, **kwargs) | return _controls_.ListCtrl_SetItemTextColour(*args, **kwargs) | SetItemTextColour(self, long item, Colour col) | SetItemTextColour(self, long item, Colour col) | [
"SetItemTextColour",
"(",
"self",
"long",
"item",
"Colour",
"col",
")"
] | def SetItemTextColour(*args, **kwargs):
"""SetItemTextColour(self, long item, Colour col)"""
return _controls_.ListCtrl_SetItemTextColour(*args, **kwargs) | [
"def",
"SetItemTextColour",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_controls_",
".",
"ListCtrl_SetItemTextColour",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_controls.py#L4736-L4738 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/scipy/scipy/cluster/hierarchy.py | python | ClusterNode.get_left | (self) | return self.left | Return a reference to the left child tree object.
Returns
-------
left : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned. | Return a reference to the left child tree object. | [
"Return",
"a",
"reference",
"to",
"the",
"left",
"child",
"tree",
"object",
"."
] | def get_left(self):
"""
Return a reference to the left child tree object.
Returns
-------
left : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.left | [
"def",
"get_left",
"(",
"self",
")",
":",
"return",
"self",
".",
"left"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/scipy/scipy/cluster/hierarchy.py#L771-L782 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/lib2to3/pytree.py | python | LeafPattern.__init__ | (self, type=None, content=None, name=None) | Initializer. Takes optional type, content, and name.
The type, if given must be a token type (< 256). If not given,
this matches any *leaf* node; the content may still be required.
The content, if given, must be a string.
If a name is given, the matching node is stored in the results
dict under that key. | Initializer. Takes optional type, content, and name. | [
"Initializer",
".",
"Takes",
"optional",
"type",
"content",
"and",
"name",
"."
] | def __init__(self, type=None, content=None, name=None):
"""
Initializer. Takes optional type, content, and name.
The type, if given must be a token type (< 256). If not given,
this matches any *leaf* node; the content may still be required.
The content, if given, must be a string.
If a name is given, the matching node is stored in the results
dict under that key.
"""
if type is not None:
assert 0 <= type < 256, type
if content is not None:
assert isinstance(content, str), repr(content)
self.type = type
self.content = content
self.name = name | [
"def",
"__init__",
"(",
"self",
",",
"type",
"=",
"None",
",",
"content",
"=",
"None",
",",
"name",
"=",
"None",
")",
":",
"if",
"type",
"is",
"not",
"None",
":",
"assert",
"0",
"<=",
"type",
"<",
"256",
",",
"type",
"if",
"content",
"is",
"not",
"None",
":",
"assert",
"isinstance",
"(",
"content",
",",
"str",
")",
",",
"repr",
"(",
"content",
")",
"self",
".",
"type",
"=",
"type",
"self",
".",
"content",
"=",
"content",
"self",
".",
"name",
"=",
"name"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/mac/Python.framework/Versions/3.7/lib/python3.7/lib2to3/pytree.py#L502-L520 |
||
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | qt/python/mantidqtinterfaces/mantidqtinterfaces/HFIR_4Circle_Reduction/mplgraphicsview.py | python | Qt4MplCanvas.add_1d_plot_right | (self, x, y, color=None, label="", x_label=None, ylabel=None, marker=None, linestyle=None,
linewidth=1) | return line_key | Add a line (1-d plot) at right axis | Add a line (1-d plot) at right axis | [
"Add",
"a",
"line",
"(",
"1",
"-",
"d",
"plot",
")",
"at",
"right",
"axis"
] | def add_1d_plot_right(self, x, y, color=None, label="", x_label=None, ylabel=None, marker=None, linestyle=None,
linewidth=1):
""" Add a line (1-d plot) at right axis
"""
if self.axes2 is None:
self.axes2 = self.axes.twinx()
# Hold previous data
self.axes2.hold(True)
# Default
if color is None:
color = (0, 1, 0, 1)
if marker is None:
marker = 'o'
if linestyle is None:
linestyle = '-'
# Special default
if len(label) == 0:
label = 'right'
color = 'red'
# color must be RGBA (4-tuple)
r = self.axes2.plot(x, y, color=color, marker=marker, linestyle=linestyle,
label=label, linewidth=linewidth)
# return: list of matplotlib.lines.Line2D object
self.axes2.set_aspect('auto')
# set x-axis and y-axis label
if x_label is not None:
self.axes2.set_xlabel(x_label, fontsize=20)
if ylabel is not None:
self.axes2.set_ylabel(ylabel, fontsize=20)
# set/update legend
self._setup_legend()
# Register
line_key = -1
if len(r) == 1:
line_key = self._lineIndex
self._lineDict[line_key] = r[0]
self._lineIndex += 1
else:
print("Impoooooooooooooooosible!")
# Flush/commit
self.draw()
return line_key | [
"def",
"add_1d_plot_right",
"(",
"self",
",",
"x",
",",
"y",
",",
"color",
"=",
"None",
",",
"label",
"=",
"\"\"",
",",
"x_label",
"=",
"None",
",",
"ylabel",
"=",
"None",
",",
"marker",
"=",
"None",
",",
"linestyle",
"=",
"None",
",",
"linewidth",
"=",
"1",
")",
":",
"if",
"self",
".",
"axes2",
"is",
"None",
":",
"self",
".",
"axes2",
"=",
"self",
".",
"axes",
".",
"twinx",
"(",
")",
"# Hold previous data",
"self",
".",
"axes2",
".",
"hold",
"(",
"True",
")",
"# Default",
"if",
"color",
"is",
"None",
":",
"color",
"=",
"(",
"0",
",",
"1",
",",
"0",
",",
"1",
")",
"if",
"marker",
"is",
"None",
":",
"marker",
"=",
"'o'",
"if",
"linestyle",
"is",
"None",
":",
"linestyle",
"=",
"'-'",
"# Special default",
"if",
"len",
"(",
"label",
")",
"==",
"0",
":",
"label",
"=",
"'right'",
"color",
"=",
"'red'",
"# color must be RGBA (4-tuple)",
"r",
"=",
"self",
".",
"axes2",
".",
"plot",
"(",
"x",
",",
"y",
",",
"color",
"=",
"color",
",",
"marker",
"=",
"marker",
",",
"linestyle",
"=",
"linestyle",
",",
"label",
"=",
"label",
",",
"linewidth",
"=",
"linewidth",
")",
"# return: list of matplotlib.lines.Line2D object",
"self",
".",
"axes2",
".",
"set_aspect",
"(",
"'auto'",
")",
"# set x-axis and y-axis label",
"if",
"x_label",
"is",
"not",
"None",
":",
"self",
".",
"axes2",
".",
"set_xlabel",
"(",
"x_label",
",",
"fontsize",
"=",
"20",
")",
"if",
"ylabel",
"is",
"not",
"None",
":",
"self",
".",
"axes2",
".",
"set_ylabel",
"(",
"ylabel",
",",
"fontsize",
"=",
"20",
")",
"# set/update legend",
"self",
".",
"_setup_legend",
"(",
")",
"# Register",
"line_key",
"=",
"-",
"1",
"if",
"len",
"(",
"r",
")",
"==",
"1",
":",
"line_key",
"=",
"self",
".",
"_lineIndex",
"self",
".",
"_lineDict",
"[",
"line_key",
"]",
"=",
"r",
"[",
"0",
"]",
"self",
".",
"_lineIndex",
"+=",
"1",
"else",
":",
"print",
"(",
"\"Impoooooooooooooooosible!\"",
")",
"# Flush/commit",
"self",
".",
"draw",
"(",
")",
"return",
"line_key"
] | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/qt/python/mantidqtinterfaces/mantidqtinterfaces/HFIR_4Circle_Reduction/mplgraphicsview.py#L1194-L1245 |
|
panda3d/panda3d | 833ad89ebad58395d0af0b7ec08538e5e4308265 | direct/src/showbase/ShowBase.py | python | ShowBase.textureOff | (self) | Disables texturing on the entire 3D scene graph. | Disables texturing on the entire 3D scene graph. | [
"Disables",
"texturing",
"on",
"the",
"entire",
"3D",
"scene",
"graph",
"."
] | def textureOff(self):
"""
Disables texturing on the entire 3D scene graph.
"""
self.render.setTextureOff(100)
self.textureEnabled = 0 | [
"def",
"textureOff",
"(",
"self",
")",
":",
"self",
".",
"render",
".",
"setTextureOff",
"(",
"100",
")",
"self",
".",
"textureEnabled",
"=",
"0"
] | https://github.com/panda3d/panda3d/blob/833ad89ebad58395d0af0b7ec08538e5e4308265/direct/src/showbase/ShowBase.py#L2347-L2352 |
||
ricardoquesada/Spidermonkey | 4a75ea2543408bd1b2c515aa95901523eeef7858 | dom/bindings/parser/WebIDL.py | python | Parser.p_AbsoluteScopedName | (self, p) | AbsoluteScopedName : SCOPE IDENTIFIER ScopedNameParts | AbsoluteScopedName : SCOPE IDENTIFIER ScopedNameParts | [
"AbsoluteScopedName",
":",
"SCOPE",
"IDENTIFIER",
"ScopedNameParts"
] | def p_AbsoluteScopedName(self, p):
"""
AbsoluteScopedName : SCOPE IDENTIFIER ScopedNameParts
"""
assert False
pass | [
"def",
"p_AbsoluteScopedName",
"(",
"self",
",",
"p",
")",
":",
"assert",
"False",
"pass"
] | https://github.com/ricardoquesada/Spidermonkey/blob/4a75ea2543408bd1b2c515aa95901523eeef7858/dom/bindings/parser/WebIDL.py#L5483-L5488 |
||
Xilinx/Vitis-AI | fc74d404563d9951b57245443c73bef389f3657f | tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/bigtable/python/ops/bigtable_api.py | python | BigtableTable.write | (self, dataset, column_families, columns, timestamp=None) | return gen_bigtable_ops.dataset_to_bigtable(
self._resource,
dataset._variant_tensor, # pylint: disable=protected-access
column_families,
columns,
timestamp) | Writes a dataset to the table.
Args:
dataset: A `tf.data.Dataset` to be written to this table. It must produce
a list of number-of-columns+1 elements, all of which must be strings.
The first value will be used as the row key, and subsequent values will
be used as cell values for the corresponding columns from the
corresponding column_families and columns entries.
column_families: A `tf.Tensor` of `tf.string`s corresponding to the
column names to store the dataset's elements into.
columns: A `tf.Tensor` of `tf.string`s corresponding to the column names
to store the dataset's elements into.
timestamp: (Optional.) An int64 timestamp to write all the values at.
Leave as None to use server-provided timestamps.
Returns:
A `tf.Operation` that can be run to perform the write.
Raises:
ValueError: If there are unexpected or incompatible types, or if the
number of columns and column_families does not match the output of
`dataset`. | Writes a dataset to the table. | [
"Writes",
"a",
"dataset",
"to",
"the",
"table",
"."
] | def write(self, dataset, column_families, columns, timestamp=None):
"""Writes a dataset to the table.
Args:
dataset: A `tf.data.Dataset` to be written to this table. It must produce
a list of number-of-columns+1 elements, all of which must be strings.
The first value will be used as the row key, and subsequent values will
be used as cell values for the corresponding columns from the
corresponding column_families and columns entries.
column_families: A `tf.Tensor` of `tf.string`s corresponding to the
column names to store the dataset's elements into.
columns: A `tf.Tensor` of `tf.string`s corresponding to the column names
to store the dataset's elements into.
timestamp: (Optional.) An int64 timestamp to write all the values at.
Leave as None to use server-provided timestamps.
Returns:
A `tf.Operation` that can be run to perform the write.
Raises:
ValueError: If there are unexpected or incompatible types, or if the
number of columns and column_families does not match the output of
`dataset`.
"""
if timestamp is None:
timestamp = -1 # Bigtable server provided timestamp.
for tensor_type in nest.flatten(
dataset_ops.get_legacy_output_types(dataset)):
if tensor_type != dtypes.string:
raise ValueError("Not all elements of the dataset were `tf.string`")
for shape in nest.flatten(dataset_ops.get_legacy_output_shapes(dataset)):
if not shape.is_compatible_with(tensor_shape.TensorShape([])):
raise ValueError("Not all elements of the dataset were scalars")
if len(column_families) != len(columns):
raise ValueError("len(column_families) != len(columns)")
if len(nest.flatten(
dataset_ops.get_legacy_output_types(dataset))) != len(columns) + 1:
raise ValueError("A column name must be specified for every component of "
"the dataset elements. (e.g.: len(columns) != "
"len(dataset.output_types))")
return gen_bigtable_ops.dataset_to_bigtable(
self._resource,
dataset._variant_tensor, # pylint: disable=protected-access
column_families,
columns,
timestamp) | [
"def",
"write",
"(",
"self",
",",
"dataset",
",",
"column_families",
",",
"columns",
",",
"timestamp",
"=",
"None",
")",
":",
"if",
"timestamp",
"is",
"None",
":",
"timestamp",
"=",
"-",
"1",
"# Bigtable server provided timestamp.",
"for",
"tensor_type",
"in",
"nest",
".",
"flatten",
"(",
"dataset_ops",
".",
"get_legacy_output_types",
"(",
"dataset",
")",
")",
":",
"if",
"tensor_type",
"!=",
"dtypes",
".",
"string",
":",
"raise",
"ValueError",
"(",
"\"Not all elements of the dataset were `tf.string`\"",
")",
"for",
"shape",
"in",
"nest",
".",
"flatten",
"(",
"dataset_ops",
".",
"get_legacy_output_shapes",
"(",
"dataset",
")",
")",
":",
"if",
"not",
"shape",
".",
"is_compatible_with",
"(",
"tensor_shape",
".",
"TensorShape",
"(",
"[",
"]",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Not all elements of the dataset were scalars\"",
")",
"if",
"len",
"(",
"column_families",
")",
"!=",
"len",
"(",
"columns",
")",
":",
"raise",
"ValueError",
"(",
"\"len(column_families) != len(columns)\"",
")",
"if",
"len",
"(",
"nest",
".",
"flatten",
"(",
"dataset_ops",
".",
"get_legacy_output_types",
"(",
"dataset",
")",
")",
")",
"!=",
"len",
"(",
"columns",
")",
"+",
"1",
":",
"raise",
"ValueError",
"(",
"\"A column name must be specified for every component of \"",
"\"the dataset elements. (e.g.: len(columns) != \"",
"\"len(dataset.output_types))\"",
")",
"return",
"gen_bigtable_ops",
".",
"dataset_to_bigtable",
"(",
"self",
".",
"_resource",
",",
"dataset",
".",
"_variant_tensor",
",",
"# pylint: disable=protected-access",
"column_families",
",",
"columns",
",",
"timestamp",
")"
] | https://github.com/Xilinx/Vitis-AI/blob/fc74d404563d9951b57245443c73bef389f3657f/tools/Vitis-AI-Quantizer/vai_q_tensorflow1.x/tensorflow/contrib/bigtable/python/ops/bigtable_api.py#L448-L493 |
|
tensorflow/tensorflow | 419e3a6b650ea4bd1b0cba23c4348f8a69f3272e | tensorflow/compiler/xla/python_api/xla_literal.py | python | ConvertLiteralToNumpyArray | (literal) | Converts a XLA literal to a Numpy array. | Converts a XLA literal to a Numpy array. | [
"Converts",
"a",
"XLA",
"literal",
"to",
"a",
"Numpy",
"array",
"."
] | def ConvertLiteralToNumpyArray(literal):
"""Converts a XLA literal to a Numpy array."""
element_type = literal.shape.element_type
if element_type == xla_data_pb2.TUPLE:
return tuple(
ConvertLiteralToNumpyArray(subliteral)
for subliteral in literal.tuple_literals)
type_record = types.MAP_XLA_TYPE_TO_RECORD[element_type]
if not literal.shape.dimensions:
return _np.array(
getattr(literal, type_record.literal_field_name)[0],
type_record.numpy_dtype)
else:
# Infer the proper Numpy order from the LiteralProto's layout. The repeated
# field representing the array's content in the Literal is linearized.
# Reading is done in two steps:
#
# 1. Read the array as 1D from the LiteralProto repeated field.
# 2. Reshape the array to its proper shape, using the right order depending
# on the LiteralProto's layout.
layout_order = literal.shape.layout.minor_to_major
numpy_shape = tuple(literal.shape.dimensions)
if layout_order == list(range(len(literal.shape.dimensions))):
numpy_reshaper = lambda arr: arr.reshape(numpy_shape, order='F')
elif layout_order == list(range(len(literal.shape.dimensions) - 1, -1, -1)):
numpy_reshaper = lambda arr: arr.reshape(numpy_shape, order='C')
else:
raise NotImplementedError('Unsupported layout: {0}'.format(layout_order))
ndarray = _np.array(
getattr(literal, type_record.literal_field_name),
copy=False,
dtype=type_record.numpy_dtype)
return numpy_reshaper(ndarray) | [
"def",
"ConvertLiteralToNumpyArray",
"(",
"literal",
")",
":",
"element_type",
"=",
"literal",
".",
"shape",
".",
"element_type",
"if",
"element_type",
"==",
"xla_data_pb2",
".",
"TUPLE",
":",
"return",
"tuple",
"(",
"ConvertLiteralToNumpyArray",
"(",
"subliteral",
")",
"for",
"subliteral",
"in",
"literal",
".",
"tuple_literals",
")",
"type_record",
"=",
"types",
".",
"MAP_XLA_TYPE_TO_RECORD",
"[",
"element_type",
"]",
"if",
"not",
"literal",
".",
"shape",
".",
"dimensions",
":",
"return",
"_np",
".",
"array",
"(",
"getattr",
"(",
"literal",
",",
"type_record",
".",
"literal_field_name",
")",
"[",
"0",
"]",
",",
"type_record",
".",
"numpy_dtype",
")",
"else",
":",
"# Infer the proper Numpy order from the LiteralProto's layout. The repeated",
"# field representing the array's content in the Literal is linearized.",
"# Reading is done in two steps:",
"#",
"# 1. Read the array as 1D from the LiteralProto repeated field.",
"# 2. Reshape the array to its proper shape, using the right order depending",
"# on the LiteralProto's layout.",
"layout_order",
"=",
"literal",
".",
"shape",
".",
"layout",
".",
"minor_to_major",
"numpy_shape",
"=",
"tuple",
"(",
"literal",
".",
"shape",
".",
"dimensions",
")",
"if",
"layout_order",
"==",
"list",
"(",
"range",
"(",
"len",
"(",
"literal",
".",
"shape",
".",
"dimensions",
")",
")",
")",
":",
"numpy_reshaper",
"=",
"lambda",
"arr",
":",
"arr",
".",
"reshape",
"(",
"numpy_shape",
",",
"order",
"=",
"'F'",
")",
"elif",
"layout_order",
"==",
"list",
"(",
"range",
"(",
"len",
"(",
"literal",
".",
"shape",
".",
"dimensions",
")",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
")",
":",
"numpy_reshaper",
"=",
"lambda",
"arr",
":",
"arr",
".",
"reshape",
"(",
"numpy_shape",
",",
"order",
"=",
"'C'",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"'Unsupported layout: {0}'",
".",
"format",
"(",
"layout_order",
")",
")",
"ndarray",
"=",
"_np",
".",
"array",
"(",
"getattr",
"(",
"literal",
",",
"type_record",
".",
"literal_field_name",
")",
",",
"copy",
"=",
"False",
",",
"dtype",
"=",
"type_record",
".",
"numpy_dtype",
")",
"return",
"numpy_reshaper",
"(",
"ndarray",
")"
] | https://github.com/tensorflow/tensorflow/blob/419e3a6b650ea4bd1b0cba23c4348f8a69f3272e/tensorflow/compiler/xla/python_api/xla_literal.py#L24-L57 |
||
thalium/icebox | 99d147d5b9269222225443ce171b4fd46d8985d4 | third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2.py | python | canonicPath | (path) | return ret | Constructs a canonic path from the specified path. | Constructs a canonic path from the specified path. | [
"Constructs",
"a",
"canonic",
"path",
"from",
"the",
"specified",
"path",
"."
] | def canonicPath(path):
"""Constructs a canonic path from the specified path. """
ret = libxml2mod.xmlCanonicPath(path)
return ret | [
"def",
"canonicPath",
"(",
"path",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"xmlCanonicPath",
"(",
"path",
")",
"return",
"ret"
] | https://github.com/thalium/icebox/blob/99d147d5b9269222225443ce171b4fd46d8985d4/third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2.py#L1792-L1795 |
|
Evolving-AI-Lab/fooling | 66f097dd6bd2eb6794ade3e187a7adfdf1887688 | caffe/scripts/cpp_lint.py | python | CheckForBadCharacters | (filename, lines, error) | Logs an error for each line containing bad characters.
Two kinds of bad characters:
1. Unicode replacement characters: These indicate that either the file
contained invalid UTF-8 (likely) or Unicode replacement characters (which
it shouldn't). Note that it's possible for this to throw off line
numbering if the invalid UTF-8 occurred adjacent to a newline.
2. NUL bytes. These are problematic for some tools.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found. | Logs an error for each line containing bad characters. | [
"Logs",
"an",
"error",
"for",
"each",
"line",
"containing",
"bad",
"characters",
"."
] | def CheckForBadCharacters(filename, lines, error):
"""Logs an error for each line containing bad characters.
Two kinds of bad characters:
1. Unicode replacement characters: These indicate that either the file
contained invalid UTF-8 (likely) or Unicode replacement characters (which
it shouldn't). Note that it's possible for this to throw off line
numbering if the invalid UTF-8 occurred adjacent to a newline.
2. NUL bytes. These are problematic for some tools.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
if u'\ufffd' in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
if '\0' in line:
error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.') | [
"def",
"CheckForBadCharacters",
"(",
"filename",
",",
"lines",
",",
"error",
")",
":",
"for",
"linenum",
",",
"line",
"in",
"enumerate",
"(",
"lines",
")",
":",
"if",
"u'\\ufffd'",
"in",
"line",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'readability/utf8'",
",",
"5",
",",
"'Line contains invalid UTF-8 (or Unicode replacement character).'",
")",
"if",
"'\\0'",
"in",
"line",
":",
"error",
"(",
"filename",
",",
"linenum",
",",
"'readability/nul'",
",",
"5",
",",
"'Line contains NUL byte.'",
")"
] | https://github.com/Evolving-AI-Lab/fooling/blob/66f097dd6bd2eb6794ade3e187a7adfdf1887688/caffe/scripts/cpp_lint.py#L1483-L1505 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/arrays/datetimelike.py | python | DatetimeLikeArrayMixin.mean | (self, skipna=True) | return self._box_func(result) | Return the mean value of the Array.
.. versionadded:: 0.25.0
Parameters
----------
skipna : bool, default True
Whether to ignore any NaT elements.
Returns
-------
scalar
Timestamp or Timedelta.
See Also
--------
numpy.ndarray.mean : Returns the average of array elements along a given axis.
Series.mean : Return the mean value in a Series.
Notes
-----
mean is only defined for Datetime and Timedelta dtypes, not for Period. | Return the mean value of the Array. | [
"Return",
"the",
"mean",
"value",
"of",
"the",
"Array",
"."
] | def mean(self, skipna=True):
"""
Return the mean value of the Array.
.. versionadded:: 0.25.0
Parameters
----------
skipna : bool, default True
Whether to ignore any NaT elements.
Returns
-------
scalar
Timestamp or Timedelta.
See Also
--------
numpy.ndarray.mean : Returns the average of array elements along a given axis.
Series.mean : Return the mean value in a Series.
Notes
-----
mean is only defined for Datetime and Timedelta dtypes, not for Period.
"""
if is_period_dtype(self):
# See discussion in GH#24757
raise TypeError(
f"mean is not implemented for {type(self).__name__} since the "
"meaning is ambiguous. An alternative is "
"obj.to_timestamp(how='start').mean()"
)
mask = self.isna()
if skipna:
values = self[~mask]
elif mask.any():
return NaT
else:
values = self
if not len(values):
# short-circuit for empty max / min
return NaT
result = nanops.nanmean(values.view("i8"), skipna=skipna)
# Don't have to worry about NA `result`, since no NA went in.
return self._box_func(result) | [
"def",
"mean",
"(",
"self",
",",
"skipna",
"=",
"True",
")",
":",
"if",
"is_period_dtype",
"(",
"self",
")",
":",
"# See discussion in GH#24757",
"raise",
"TypeError",
"(",
"f\"mean is not implemented for {type(self).__name__} since the \"",
"\"meaning is ambiguous. An alternative is \"",
"\"obj.to_timestamp(how='start').mean()\"",
")",
"mask",
"=",
"self",
".",
"isna",
"(",
")",
"if",
"skipna",
":",
"values",
"=",
"self",
"[",
"~",
"mask",
"]",
"elif",
"mask",
".",
"any",
"(",
")",
":",
"return",
"NaT",
"else",
":",
"values",
"=",
"self",
"if",
"not",
"len",
"(",
"values",
")",
":",
"# short-circuit for empty max / min",
"return",
"NaT",
"result",
"=",
"nanops",
".",
"nanmean",
"(",
"values",
".",
"view",
"(",
"\"i8\"",
")",
",",
"skipna",
"=",
"skipna",
")",
"# Don't have to worry about NA `result`, since no NA went in.",
"return",
"self",
".",
"_box_func",
"(",
"result",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pandas/core/arrays/datetimelike.py#L1540-L1587 |
|
PaddlePaddle/Paddle | 1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c | python/paddle/fluid/contrib/slim/quantization/post_training_quantization.py | python | PostTrainingQuantization._set_activation_persistable | (self) | Set activation variables to be persistable, so can obtain
the tensor data in sample_data | Set activation variables to be persistable, so can obtain
the tensor data in sample_data | [
"Set",
"activation",
"variables",
"to",
"be",
"persistable",
"so",
"can",
"obtain",
"the",
"tensor",
"data",
"in",
"sample_data"
] | def _set_activation_persistable(self):
'''
Set activation variables to be persistable, so can obtain
the tensor data in sample_data
'''
for var in self._program.list_vars():
if var.name in self._quantized_act_var_name:
var.persistable = True | [
"def",
"_set_activation_persistable",
"(",
"self",
")",
":",
"for",
"var",
"in",
"self",
".",
"_program",
".",
"list_vars",
"(",
")",
":",
"if",
"var",
".",
"name",
"in",
"self",
".",
"_quantized_act_var_name",
":",
"var",
".",
"persistable",
"=",
"True"
] | https://github.com/PaddlePaddle/Paddle/blob/1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c/python/paddle/fluid/contrib/slim/quantization/post_training_quantization.py#L553-L560 |
||
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/python/ipython/py3/IPython/core/history.py | python | HistorySavingThread.stop | (self) | This can be called from the main thread to safely stop this thread.
Note that it does not attempt to write out remaining history before
exiting. That should be done by calling the HistoryManager's
end_session method. | This can be called from the main thread to safely stop this thread. | [
"This",
"can",
"be",
"called",
"from",
"the",
"main",
"thread",
"to",
"safely",
"stop",
"this",
"thread",
"."
] | def stop(self):
"""This can be called from the main thread to safely stop this thread.
Note that it does not attempt to write out remaining history before
exiting. That should be done by calling the HistoryManager's
end_session method."""
self.stop_now = True
self.history_manager.save_flag.set()
self.join() | [
"def",
"stop",
"(",
"self",
")",
":",
"self",
".",
"stop_now",
"=",
"True",
"self",
".",
"history_manager",
".",
"save_flag",
".",
"set",
"(",
")",
"self",
".",
"join",
"(",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/python/ipython/py3/IPython/core/history.py#L839-L847 |
||
natanielruiz/android-yolo | 1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f | jni-build/jni/include/tensorflow/contrib/lookup/lookup_ops.py | python | MutableHashTable.size | (self, name=None) | Compute the number of elements in this table.
Args:
name: A name for the operation (optional).
Returns:
A scalar tensor containing the number of elements in this table. | Compute the number of elements in this table. | [
"Compute",
"the",
"number",
"of",
"elements",
"in",
"this",
"table",
"."
] | def size(self, name=None):
"""Compute the number of elements in this table.
Args:
name: A name for the operation (optional).
Returns:
A scalar tensor containing the number of elements in this table.
"""
with ops.op_scope([self._table_ref], name, "%s_Size" % self._name) as name:
# pylint: disable=protected-access
return gen_data_flow_ops._lookup_table_size(self._table_ref, name=name) | [
"def",
"size",
"(",
"self",
",",
"name",
"=",
"None",
")",
":",
"with",
"ops",
".",
"op_scope",
"(",
"[",
"self",
".",
"_table_ref",
"]",
",",
"name",
",",
"\"%s_Size\"",
"%",
"self",
".",
"_name",
")",
"as",
"name",
":",
"# pylint: disable=protected-access",
"return",
"gen_data_flow_ops",
".",
"_lookup_table_size",
"(",
"self",
".",
"_table_ref",
",",
"name",
"=",
"name",
")"
] | https://github.com/natanielruiz/android-yolo/blob/1ebb54f96a67a20ff83ddfc823ed83a13dc3a47f/jni-build/jni/include/tensorflow/contrib/lookup/lookup_ops.py#L754-L765 |
||
msftguy/ssh-rd | a5f3a79daeac5844edebf01916c9613563f1c390 | _3rd/boost_1_48_0/tools/build/v2/build/virtual_target.py | python | Action.actualize_source_type | (self, sources, prop_set) | return result | Helper for 'actualize_sources'.
For each passed source, actualizes it with the appropriate scanner.
Returns the actualized virtual targets. | Helper for 'actualize_sources'.
For each passed source, actualizes it with the appropriate scanner.
Returns the actualized virtual targets. | [
"Helper",
"for",
"actualize_sources",
".",
"For",
"each",
"passed",
"source",
"actualizes",
"it",
"with",
"the",
"appropriate",
"scanner",
".",
"Returns",
"the",
"actualized",
"virtual",
"targets",
"."
] | def actualize_source_type (self, sources, prop_set):
""" Helper for 'actualize_sources'.
For each passed source, actualizes it with the appropriate scanner.
Returns the actualized virtual targets.
"""
result = []
for i in sources:
scanner = None
# FIXME: what's this?
# if isinstance (i, str):
# i = self.manager_.get_object (i)
if i.type ():
scanner = b2.build.type.get_scanner (i.type (), prop_set)
r = i.actualize (scanner)
result.append (r)
return result | [
"def",
"actualize_source_type",
"(",
"self",
",",
"sources",
",",
"prop_set",
")",
":",
"result",
"=",
"[",
"]",
"for",
"i",
"in",
"sources",
":",
"scanner",
"=",
"None",
"# FIXME: what's this?",
"# if isinstance (i, str):",
"# i = self.manager_.get_object (i)",
"if",
"i",
".",
"type",
"(",
")",
":",
"scanner",
"=",
"b2",
".",
"build",
".",
"type",
".",
"get_scanner",
"(",
"i",
".",
"type",
"(",
")",
",",
"prop_set",
")",
"r",
"=",
"i",
".",
"actualize",
"(",
"scanner",
")",
"result",
".",
"append",
"(",
"r",
")",
"return",
"result"
] | https://github.com/msftguy/ssh-rd/blob/a5f3a79daeac5844edebf01916c9613563f1c390/_3rd/boost_1_48_0/tools/build/v2/build/virtual_target.py#L835-L854 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | wx/lib/agw/shortcuteditor.py | python | ShortcutEditor.PreShow | (self) | Does some more common initialization before showing :class:`ShortcutEditor`. | Does some more common initialization before showing :class:`ShortcutEditor`. | [
"Does",
"some",
"more",
"common",
"initialization",
"before",
"showing",
":",
"class",
":",
"ShortcutEditor",
"."
] | def PreShow(self):
""" Does some more common initialization before showing :class:`ShortcutEditor`. """
self.listShortcut.MakeImageList()
self.listShortcut.RecreateTree()
self.SetColumnWidths() | [
"def",
"PreShow",
"(",
"self",
")",
":",
"self",
".",
"listShortcut",
".",
"MakeImageList",
"(",
")",
"self",
".",
"listShortcut",
".",
"RecreateTree",
"(",
")",
"self",
".",
"SetColumnWidths",
"(",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/wx/lib/agw/shortcuteditor.py#L2578-L2584 |
||
thalium/icebox | 99d147d5b9269222225443ce171b4fd46d8985d4 | third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2.py | python | xmlTextReader.RelaxNGSetSchema | (self, schema) | return ret | Use RelaxNG to validate the document as it is processed.
Activation is only possible before the first Read(). if
@schema is None, then RelaxNG validation is desactivated. @
The @schema should not be freed until the reader is
deallocated or its use has been deactivated. | Use RelaxNG to validate the document as it is processed.
Activation is only possible before the first Read(). if | [
"Use",
"RelaxNG",
"to",
"validate",
"the",
"document",
"as",
"it",
"is",
"processed",
".",
"Activation",
"is",
"only",
"possible",
"before",
"the",
"first",
"Read",
"()",
".",
"if"
] | def RelaxNGSetSchema(self, schema):
"""Use RelaxNG to validate the document as it is processed.
Activation is only possible before the first Read(). if
@schema is None, then RelaxNG validation is desactivated. @
The @schema should not be freed until the reader is
deallocated or its use has been deactivated. """
if schema is None: schema__o = None
else: schema__o = schema._o
ret = libxml2mod.xmlTextReaderRelaxNGSetSchema(self._o, schema__o)
return ret | [
"def",
"RelaxNGSetSchema",
"(",
"self",
",",
"schema",
")",
":",
"if",
"schema",
"is",
"None",
":",
"schema__o",
"=",
"None",
"else",
":",
"schema__o",
"=",
"schema",
".",
"_o",
"ret",
"=",
"libxml2mod",
".",
"xmlTextReaderRelaxNGSetSchema",
"(",
"self",
".",
"_o",
",",
"schema__o",
")",
"return",
"ret"
] | https://github.com/thalium/icebox/blob/99d147d5b9269222225443ce171b4fd46d8985d4/third_party/virtualbox/src/libs/libxml2-2.9.4/python/libxml2.py#L6860-L6869 |
|
adobe/chromium | cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7 | third_party/closure_linter/closure_linter/statetracker.py | python | StateTracker.InObjectLiteralDescendant | (self) | return self.OBJECT_LITERAL in self._block_types | Returns true if the current token has an object literal ancestor.
Returns:
True if the current token has an object literal ancestor. | Returns true if the current token has an object literal ancestor. | [
"Returns",
"true",
"if",
"the",
"current",
"token",
"has",
"an",
"object",
"literal",
"ancestor",
"."
] | def InObjectLiteralDescendant(self):
"""Returns true if the current token has an object literal ancestor.
Returns:
True if the current token has an object literal ancestor.
"""
return self.OBJECT_LITERAL in self._block_types | [
"def",
"InObjectLiteralDescendant",
"(",
"self",
")",
":",
"return",
"self",
".",
"OBJECT_LITERAL",
"in",
"self",
".",
"_block_types"
] | https://github.com/adobe/chromium/blob/cfe5bf0b51b1f6b9fe239c2a3c2f2364da9967d7/third_party/closure_linter/closure_linter/statetracker.py#L685-L691 |
|
nnrg/opennero | 43e12a1bcba6e228639db3886fec1dc47ddc24cb | mods/NERO/environment.py | python | NeroEnvironment.get_agent_info | (self, agent) | return OpenNero.AgentInitInfo(sbound, abound, rbound) | return a blueprint for a new agent | return a blueprint for a new agent | [
"return",
"a",
"blueprint",
"for",
"a",
"new",
"agent"
] | def get_agent_info(self, agent):
"""
return a blueprint for a new agent
"""
for a in constants.WALL_RAY_SENSORS:
agent.add_sensor(OpenNero.RaySensor(
math.cos(math.radians(a)), math.sin(math.radians(a)), 0,
constants.WALL_SENSOR_RADIUS,
constants.OBJECT_TYPE_OBSTACLE,
False))
for a0, a1 in constants.FLAG_RADAR_SENSORS:
agent.add_sensor(OpenNero.RadarSensor(
a0, a1, -90, 90, constants.MAX_VISION_RADIUS,
constants.OBJECT_TYPE_FLAG,
False))
sense = constants.OBJECT_TYPE_TEAM_0
if agent.team_type == sense:
sense = constants.OBJECT_TYPE_TEAM_1
for a0, a1 in constants.ENEMY_RADAR_SENSORS:
agent.add_sensor(OpenNero.RadarSensor(
a0, a1, -90, 90, constants.MAX_VISION_RADIUS,
sense,
False))
for a in constants.TARGETING_SENSORS:
agent.add_sensor(OpenNero.RaySensor(
math.cos(math.radians(a)), math.sin(math.radians(a)), 0,
constants.TARGET_SENSOR_RADIUS,
sense,
False))
abound = OpenNero.FeatureVectorInfo() # actions
sbound = OpenNero.FeatureVectorInfo() # sensors
# actions
abound.add_continuous(-1, 1) # forward/backward speed
abound.add_continuous(-constants.MAX_TURNING_RATE, constants.MAX_TURNING_RATE) # left/right turn (in radians)
abound.add_continuous(0, 1) # fire
abound.add_continuous(0, 1) # omit friend sensors
# sensor dimensions
for a in range(constants.N_SENSORS):
sbound.add_continuous(0, 1)
rbound = OpenNero.FeatureVectorInfo()
rbound.add_continuous(0, 1)
return OpenNero.AgentInitInfo(sbound, abound, rbound) | [
"def",
"get_agent_info",
"(",
"self",
",",
"agent",
")",
":",
"for",
"a",
"in",
"constants",
".",
"WALL_RAY_SENSORS",
":",
"agent",
".",
"add_sensor",
"(",
"OpenNero",
".",
"RaySensor",
"(",
"math",
".",
"cos",
"(",
"math",
".",
"radians",
"(",
"a",
")",
")",
",",
"math",
".",
"sin",
"(",
"math",
".",
"radians",
"(",
"a",
")",
")",
",",
"0",
",",
"constants",
".",
"WALL_SENSOR_RADIUS",
",",
"constants",
".",
"OBJECT_TYPE_OBSTACLE",
",",
"False",
")",
")",
"for",
"a0",
",",
"a1",
"in",
"constants",
".",
"FLAG_RADAR_SENSORS",
":",
"agent",
".",
"add_sensor",
"(",
"OpenNero",
".",
"RadarSensor",
"(",
"a0",
",",
"a1",
",",
"-",
"90",
",",
"90",
",",
"constants",
".",
"MAX_VISION_RADIUS",
",",
"constants",
".",
"OBJECT_TYPE_FLAG",
",",
"False",
")",
")",
"sense",
"=",
"constants",
".",
"OBJECT_TYPE_TEAM_0",
"if",
"agent",
".",
"team_type",
"==",
"sense",
":",
"sense",
"=",
"constants",
".",
"OBJECT_TYPE_TEAM_1",
"for",
"a0",
",",
"a1",
"in",
"constants",
".",
"ENEMY_RADAR_SENSORS",
":",
"agent",
".",
"add_sensor",
"(",
"OpenNero",
".",
"RadarSensor",
"(",
"a0",
",",
"a1",
",",
"-",
"90",
",",
"90",
",",
"constants",
".",
"MAX_VISION_RADIUS",
",",
"sense",
",",
"False",
")",
")",
"for",
"a",
"in",
"constants",
".",
"TARGETING_SENSORS",
":",
"agent",
".",
"add_sensor",
"(",
"OpenNero",
".",
"RaySensor",
"(",
"math",
".",
"cos",
"(",
"math",
".",
"radians",
"(",
"a",
")",
")",
",",
"math",
".",
"sin",
"(",
"math",
".",
"radians",
"(",
"a",
")",
")",
",",
"0",
",",
"constants",
".",
"TARGET_SENSOR_RADIUS",
",",
"sense",
",",
"False",
")",
")",
"abound",
"=",
"OpenNero",
".",
"FeatureVectorInfo",
"(",
")",
"# actions",
"sbound",
"=",
"OpenNero",
".",
"FeatureVectorInfo",
"(",
")",
"# sensors",
"# actions",
"abound",
".",
"add_continuous",
"(",
"-",
"1",
",",
"1",
")",
"# forward/backward speed",
"abound",
".",
"add_continuous",
"(",
"-",
"constants",
".",
"MAX_TURNING_RATE",
",",
"constants",
".",
"MAX_TURNING_RATE",
")",
"# left/right turn (in radians)",
"abound",
".",
"add_continuous",
"(",
"0",
",",
"1",
")",
"# fire ",
"abound",
".",
"add_continuous",
"(",
"0",
",",
"1",
")",
"# omit friend sensors ",
"# sensor dimensions",
"for",
"a",
"in",
"range",
"(",
"constants",
".",
"N_SENSORS",
")",
":",
"sbound",
".",
"add_continuous",
"(",
"0",
",",
"1",
")",
"rbound",
"=",
"OpenNero",
".",
"FeatureVectorInfo",
"(",
")",
"rbound",
".",
"add_continuous",
"(",
"0",
",",
"1",
")",
"return",
"OpenNero",
".",
"AgentInitInfo",
"(",
"sbound",
",",
"abound",
",",
"rbound",
")"
] | https://github.com/nnrg/opennero/blob/43e12a1bcba6e228639db3886fec1dc47ddc24cb/mods/NERO/environment.py#L275-L320 |
|
ablab/quast | 5f6709528129a6ad266a6b24ef3f40b88f0fe04b | quast_libs/busco/pipebricks/PipeLogger.py | python | PipeLogger.warning | (self, msg, *args, **kwargs) | This function overrides the _logger class warning
:param msg: the message to log
:type msg: str | This function overrides the _logger class warning
:param msg: the message to log
:type msg: str | [
"This",
"function",
"overrides",
"the",
"_logger",
"class",
"warning",
":",
"param",
"msg",
":",
"the",
"message",
"to",
"log",
":",
"type",
"msg",
":",
"str"
] | def warning(self, msg, *args, **kwargs):
"""
This function overrides the _logger class warning
:param msg: the message to log
:type msg: str
"""
PipeLogger._has_warning = True
super(PipeLogger, self).warning(msg, *args, **kwargs) | [
"def",
"warning",
"(",
"self",
",",
"msg",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"PipeLogger",
".",
"_has_warning",
"=",
"True",
"super",
"(",
"PipeLogger",
",",
"self",
")",
".",
"warning",
"(",
"msg",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/ablab/quast/blob/5f6709528129a6ad266a6b24ef3f40b88f0fe04b/quast_libs/busco/pipebricks/PipeLogger.py#L111-L118 |
||
microsoft/checkedc-clang | a173fefde5d7877b7750e7ce96dd08cf18baebf2 | mlir/utils/gdb-scripts/prettyprinters.py | python | StorageTypeMap._init_map | (self) | Lazy initialization of self.map. | Lazy initialization of self.map. | [
"Lazy",
"initialization",
"of",
"self",
".",
"map",
"."
] | def _init_map(self):
"""Lazy initialization of self.map."""
if self.map:
return
self.map = {}
for type_name in self.type_names:
concrete_type = gdb.lookup_type(type_name)
try:
storage = gdb.parse_and_eval(
"&'mlir::detail::TypeIDExported::get<%s>()::instance'" % type_name)
except gdb.error:
# Skip when TypeID instance cannot be found in current context.
continue
if concrete_type and storage:
self.map[int(storage)] = concrete_type | [
"def",
"_init_map",
"(",
"self",
")",
":",
"if",
"self",
".",
"map",
":",
"return",
"self",
".",
"map",
"=",
"{",
"}",
"for",
"type_name",
"in",
"self",
".",
"type_names",
":",
"concrete_type",
"=",
"gdb",
".",
"lookup_type",
"(",
"type_name",
")",
"try",
":",
"storage",
"=",
"gdb",
".",
"parse_and_eval",
"(",
"\"&'mlir::detail::TypeIDExported::get<%s>()::instance'\"",
"%",
"type_name",
")",
"except",
"gdb",
".",
"error",
":",
"# Skip when TypeID instance cannot be found in current context.",
"continue",
"if",
"concrete_type",
"and",
"storage",
":",
"self",
".",
"map",
"[",
"int",
"(",
"storage",
")",
"]",
"=",
"concrete_type"
] | https://github.com/microsoft/checkedc-clang/blob/a173fefde5d7877b7750e7ce96dd08cf18baebf2/mlir/utils/gdb-scripts/prettyprinters.py#L99-L113 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/llvmlite/ir/builder.py | python | IRBuilder.if_then | (self, pred, likely=None) | A context manager which sets up a conditional basic block based
on the given predicate (a i1 value). If the conditional block
is not explicitly terminated, a branch will be added to the next
block.
If *likely* is given, its boolean value indicates whether the
predicate is likely to be true or not, and metadata is issued
for LLVM's optimizers to account for that. | A context manager which sets up a conditional basic block based
on the given predicate (a i1 value). If the conditional block
is not explicitly terminated, a branch will be added to the next
block.
If *likely* is given, its boolean value indicates whether the
predicate is likely to be true or not, and metadata is issued
for LLVM's optimizers to account for that. | [
"A",
"context",
"manager",
"which",
"sets",
"up",
"a",
"conditional",
"basic",
"block",
"based",
"on",
"the",
"given",
"predicate",
"(",
"a",
"i1",
"value",
")",
".",
"If",
"the",
"conditional",
"block",
"is",
"not",
"explicitly",
"terminated",
"a",
"branch",
"will",
"be",
"added",
"to",
"the",
"next",
"block",
".",
"If",
"*",
"likely",
"*",
"is",
"given",
"its",
"boolean",
"value",
"indicates",
"whether",
"the",
"predicate",
"is",
"likely",
"to",
"be",
"true",
"or",
"not",
"and",
"metadata",
"is",
"issued",
"for",
"LLVM",
"s",
"optimizers",
"to",
"account",
"for",
"that",
"."
] | def if_then(self, pred, likely=None):
"""
A context manager which sets up a conditional basic block based
on the given predicate (a i1 value). If the conditional block
is not explicitly terminated, a branch will be added to the next
block.
If *likely* is given, its boolean value indicates whether the
predicate is likely to be true or not, and metadata is issued
for LLVM's optimizers to account for that.
"""
bb = self.basic_block
bbif = self.append_basic_block(name=_label_suffix(bb.name, '.if'))
bbend = self.append_basic_block(name=_label_suffix(bb.name, '.endif'))
br = self.cbranch(pred, bbif, bbend)
if likely is not None:
br.set_weights([99, 1] if likely else [1, 99])
with self._branch_helper(bbif, bbend):
yield bbend
self.position_at_end(bbend) | [
"def",
"if_then",
"(",
"self",
",",
"pred",
",",
"likely",
"=",
"None",
")",
":",
"bb",
"=",
"self",
".",
"basic_block",
"bbif",
"=",
"self",
".",
"append_basic_block",
"(",
"name",
"=",
"_label_suffix",
"(",
"bb",
".",
"name",
",",
"'.if'",
")",
")",
"bbend",
"=",
"self",
".",
"append_basic_block",
"(",
"name",
"=",
"_label_suffix",
"(",
"bb",
".",
"name",
",",
"'.endif'",
")",
")",
"br",
"=",
"self",
".",
"cbranch",
"(",
"pred",
",",
"bbif",
",",
"bbend",
")",
"if",
"likely",
"is",
"not",
"None",
":",
"br",
".",
"set_weights",
"(",
"[",
"99",
",",
"1",
"]",
"if",
"likely",
"else",
"[",
"1",
",",
"99",
"]",
")",
"with",
"self",
".",
"_branch_helper",
"(",
"bbif",
",",
"bbend",
")",
":",
"yield",
"bbend",
"self",
".",
"position_at_end",
"(",
"bbend",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/llvmlite/ir/builder.py#L269-L289 |
||
ApolloAuto/apollo-platform | 86d9dc6743b496ead18d597748ebabd34a513289 | ros/third_party/lib_x86_64/python2.7/dist-packages/numpy/polynomial/hermite_e.py | python | hermepow | (c, pow, maxpower=16) | Raise a Hermite series to a power.
Returns the Hermite series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Hermite series of power.
See Also
--------
hermeadd, hermesub, hermemul, hermediv
Examples
--------
>>> from numpy.polynomial.hermite_e import hermepow
>>> hermepow([1, 2, 3], 2)
array([ 23., 28., 46., 12., 9.]) | Raise a Hermite series to a power. | [
"Raise",
"a",
"Hermite",
"series",
"to",
"a",
"power",
"."
] | def hermepow(c, pow, maxpower=16) :
"""Raise a Hermite series to a power.
Returns the Hermite series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Hermite series of power.
See Also
--------
hermeadd, hermesub, hermemul, hermediv
Examples
--------
>>> from numpy.polynomial.hermite_e import hermepow
>>> hermepow([1, 2, 3], 2)
array([ 23., 28., 46., 12., 9.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0 :
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower :
raise ValueError("Power is too large")
elif power == 0 :
return np.array([1], dtype=c.dtype)
elif power == 1 :
return c
else :
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1) :
prd = hermemul(prd, c)
return prd | [
"def",
"hermepow",
"(",
"c",
",",
"pow",
",",
"maxpower",
"=",
"16",
")",
":",
"# c is a trimmed copy",
"[",
"c",
"]",
"=",
"pu",
".",
"as_series",
"(",
"[",
"c",
"]",
")",
"power",
"=",
"int",
"(",
"pow",
")",
"if",
"power",
"!=",
"pow",
"or",
"power",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Power must be a non-negative integer.\"",
")",
"elif",
"maxpower",
"is",
"not",
"None",
"and",
"power",
">",
"maxpower",
":",
"raise",
"ValueError",
"(",
"\"Power is too large\"",
")",
"elif",
"power",
"==",
"0",
":",
"return",
"np",
".",
"array",
"(",
"[",
"1",
"]",
",",
"dtype",
"=",
"c",
".",
"dtype",
")",
"elif",
"power",
"==",
"1",
":",
"return",
"c",
"else",
":",
"# This can be made more efficient by using powers of two",
"# in the usual way.",
"prd",
"=",
"c",
"for",
"i",
"in",
"range",
"(",
"2",
",",
"power",
"+",
"1",
")",
":",
"prd",
"=",
"hermemul",
"(",
"prd",
",",
"c",
")",
"return",
"prd"
] | https://github.com/ApolloAuto/apollo-platform/blob/86d9dc6743b496ead18d597748ebabd34a513289/ros/third_party/lib_x86_64/python2.7/dist-packages/numpy/polynomial/hermite_e.py#L580-L631 |
||
psi4/psi4 | be533f7f426b6ccc263904e55122899b16663395 | psi4/driver/qcdb/libmintspointgrp.py | python | CharacterTable.nirrep | (self) | return self.PYnirrep | Returns the number of irreps. | Returns the number of irreps. | [
"Returns",
"the",
"number",
"of",
"irreps",
"."
] | def nirrep(self):
"""Returns the number of irreps."""
return self.PYnirrep | [
"def",
"nirrep",
"(",
"self",
")",
":",
"return",
"self",
".",
"PYnirrep"
] | https://github.com/psi4/psi4/blob/be533f7f426b6ccc263904e55122899b16663395/psi4/driver/qcdb/libmintspointgrp.py#L963-L965 |
|
weolar/miniblink49 | 1c4678db0594a4abde23d3ebbcc7cd13c3170777 | third_party/WebKit/Source/bindings/scripts/blink_idl_parser.py | python | BlinkIDLParser.p_StringLiteralList | (self, p) | StringLiteralList : StringLiteral ',' StringLiteralList
| StringLiteral | StringLiteralList : StringLiteral ',' StringLiteralList
| StringLiteral | [
"StringLiteralList",
":",
"StringLiteral",
"StringLiteralList",
"|",
"StringLiteral"
] | def p_StringLiteralList(self, p):
"""StringLiteralList : StringLiteral ',' StringLiteralList
| StringLiteral"""
def unwrap_string(ls):
"""Reach in and grab the string literal's "NAME"."""
return ls[1].value
if len(p) > 3:
p[0] = ListFromConcat(unwrap_string(p[1]), p[3])
else:
p[0] = ListFromConcat(unwrap_string(p[1])) | [
"def",
"p_StringLiteralList",
"(",
"self",
",",
"p",
")",
":",
"def",
"unwrap_string",
"(",
"ls",
")",
":",
"\"\"\"Reach in and grab the string literal's \"NAME\".\"\"\"",
"return",
"ls",
"[",
"1",
"]",
".",
"value",
"if",
"len",
"(",
"p",
")",
">",
"3",
":",
"p",
"[",
"0",
"]",
"=",
"ListFromConcat",
"(",
"unwrap_string",
"(",
"p",
"[",
"1",
"]",
")",
",",
"p",
"[",
"3",
"]",
")",
"else",
":",
"p",
"[",
"0",
"]",
"=",
"ListFromConcat",
"(",
"unwrap_string",
"(",
"p",
"[",
"1",
"]",
")",
")"
] | https://github.com/weolar/miniblink49/blob/1c4678db0594a4abde23d3ebbcc7cd13c3170777/third_party/WebKit/Source/bindings/scripts/blink_idl_parser.py#L367-L377 |
||
mantidproject/mantid | 03deeb89254ec4289edb8771e0188c2090a02f32 | qt/python/mantidqtinterfaces/mantidqtinterfaces/HFIR_4Circle_Reduction/fourcircle_utility.py | python | get_det_xml_file_url | (server_url, instrument_name, exp_number, scan_number, pt_number) | return file_url | Get the URL to download the detector counts file in XML format
:param server_url:
:param instrument_name:
:param exp_number:
:param scan_number:
:param pt_number:
:return: | Get the URL to download the detector counts file in XML format
:param server_url:
:param instrument_name:
:param exp_number:
:param scan_number:
:param pt_number:
:return: | [
"Get",
"the",
"URL",
"to",
"download",
"the",
"detector",
"counts",
"file",
"in",
"XML",
"format",
":",
"param",
"server_url",
":",
":",
"param",
"instrument_name",
":",
":",
"param",
"exp_number",
":",
":",
"param",
"scan_number",
":",
":",
"param",
"pt_number",
":",
":",
"return",
":"
] | def get_det_xml_file_url(server_url, instrument_name, exp_number, scan_number, pt_number):
""" Get the URL to download the detector counts file in XML format
:param server_url:
:param instrument_name:
:param exp_number:
:param scan_number:
:param pt_number:
:return:
"""
assert isinstance(server_url, str) and isinstance(instrument_name, str)
assert isinstance(exp_number, int) and isinstance(scan_number, int) and isinstance(pt_number, int)
base_file_name = get_det_xml_file_name(instrument_name, exp_number, scan_number, pt_number)
file_url = '%s/exp%d/Datafiles/%s' % (server_url, exp_number, base_file_name)
return file_url | [
"def",
"get_det_xml_file_url",
"(",
"server_url",
",",
"instrument_name",
",",
"exp_number",
",",
"scan_number",
",",
"pt_number",
")",
":",
"assert",
"isinstance",
"(",
"server_url",
",",
"str",
")",
"and",
"isinstance",
"(",
"instrument_name",
",",
"str",
")",
"assert",
"isinstance",
"(",
"exp_number",
",",
"int",
")",
"and",
"isinstance",
"(",
"scan_number",
",",
"int",
")",
"and",
"isinstance",
"(",
"pt_number",
",",
"int",
")",
"base_file_name",
"=",
"get_det_xml_file_name",
"(",
"instrument_name",
",",
"exp_number",
",",
"scan_number",
",",
"pt_number",
")",
"file_url",
"=",
"'%s/exp%d/Datafiles/%s'",
"%",
"(",
"server_url",
",",
"exp_number",
",",
"base_file_name",
")",
"return",
"file_url"
] | https://github.com/mantidproject/mantid/blob/03deeb89254ec4289edb8771e0188c2090a02f32/qt/python/mantidqtinterfaces/mantidqtinterfaces/HFIR_4Circle_Reduction/fourcircle_utility.py#L393-L408 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python/src/Lib/lib-tk/tkFileDialog.py | python | askopenfilename | (**options) | return Open(**options).show() | Ask for a filename to open | Ask for a filename to open | [
"Ask",
"for",
"a",
"filename",
"to",
"open"
] | def askopenfilename(**options):
"Ask for a filename to open"
return Open(**options).show() | [
"def",
"askopenfilename",
"(",
"*",
"*",
"options",
")",
":",
"return",
"Open",
"(",
"*",
"*",
"options",
")",
".",
"show",
"(",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/lib-tk/tkFileDialog.py#L122-L125 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/_gdi.py | python | PseudoDC.DrawEllipseRect | (*args, **kwargs) | return _gdi_.PseudoDC_DrawEllipseRect(*args, **kwargs) | DrawEllipseRect(self, Rect rect)
Draws an ellipse contained in the specified rectangle. The current pen
is used for the outline and the current brush for filling the shape. | DrawEllipseRect(self, Rect rect) | [
"DrawEllipseRect",
"(",
"self",
"Rect",
"rect",
")"
] | def DrawEllipseRect(*args, **kwargs):
"""
DrawEllipseRect(self, Rect rect)
Draws an ellipse contained in the specified rectangle. The current pen
is used for the outline and the current brush for filling the shape.
"""
return _gdi_.PseudoDC_DrawEllipseRect(*args, **kwargs) | [
"def",
"DrawEllipseRect",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_gdi_",
".",
"PseudoDC_DrawEllipseRect",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_gdi.py#L8025-L8032 |
|
CRYTEK/CRYENGINE | 232227c59a220cbbd311576f0fbeba7bb53b2a8c | Code/Tools/waf-1.7.13/waflib/Tools/ldc2.py | python | find_ldc2 | (conf) | Find the program *ldc2* and set the variable *D* | Find the program *ldc2* and set the variable *D* | [
"Find",
"the",
"program",
"*",
"ldc2",
"*",
"and",
"set",
"the",
"variable",
"*",
"D",
"*"
] | def find_ldc2(conf):
"""
Find the program *ldc2* and set the variable *D*
"""
conf.find_program(['ldc2'], var='D')
out = conf.cmd_and_log([conf.env.D, '-version'])
if out.find("based on DMD v2.") == -1:
conf.fatal("detected compiler is not ldc2") | [
"def",
"find_ldc2",
"(",
"conf",
")",
":",
"conf",
".",
"find_program",
"(",
"[",
"'ldc2'",
"]",
",",
"var",
"=",
"'D'",
")",
"out",
"=",
"conf",
".",
"cmd_and_log",
"(",
"[",
"conf",
".",
"env",
".",
"D",
",",
"'-version'",
"]",
")",
"if",
"out",
".",
"find",
"(",
"\"based on DMD v2.\"",
")",
"==",
"-",
"1",
":",
"conf",
".",
"fatal",
"(",
"\"detected compiler is not ldc2\"",
")"
] | https://github.com/CRYTEK/CRYENGINE/blob/232227c59a220cbbd311576f0fbeba7bb53b2a8c/Code/Tools/waf-1.7.13/waflib/Tools/ldc2.py#L10-L19 |
||
Samsung/veles | 95ed733c2e49bc011ad98ccf2416ecec23fbf352 | veles/external/freetype/__init__.py | python | Stroker.end_subpath | (self) | Close the current sub-path in the stroker.
**Note**:
You should call this function after 'begin_subpath'. If the subpath
was not 'opened', this function 'draws' a single line segment to the
start position when needed. | Close the current sub-path in the stroker. | [
"Close",
"the",
"current",
"sub",
"-",
"path",
"in",
"the",
"stroker",
"."
] | def end_subpath(self):
'''
Close the current sub-path in the stroker.
**Note**:
You should call this function after 'begin_subpath'. If the subpath
was not 'opened', this function 'draws' a single line segment to the
start position when needed.
'''
error = FT_Stroker_EndSubPath(self._FT_Stroker)
if error: raise FT_Exception(error) | [
"def",
"end_subpath",
"(",
"self",
")",
":",
"error",
"=",
"FT_Stroker_EndSubPath",
"(",
"self",
".",
"_FT_Stroker",
")",
"if",
"error",
":",
"raise",
"FT_Exception",
"(",
"error",
")"
] | https://github.com/Samsung/veles/blob/95ed733c2e49bc011ad98ccf2416ecec23fbf352/veles/external/freetype/__init__.py#L1800-L1811 |
||
grpc/grpc | 27bc6fe7797e43298dc931b96dc57322d0852a9f | tools/distrib/python/grpcio_tools/grpc_tools/protoc.py | python | main | (command_arguments) | return _protoc_compiler.run_main(command_arguments) | Run the protocol buffer compiler with the given command-line arguments.
Args:
command_arguments: a list of strings representing command line arguments to
`protoc`. | Run the protocol buffer compiler with the given command-line arguments. | [
"Run",
"the",
"protocol",
"buffer",
"compiler",
"with",
"the",
"given",
"command",
"-",
"line",
"arguments",
"."
] | def main(command_arguments):
"""Run the protocol buffer compiler with the given command-line arguments.
Args:
command_arguments: a list of strings representing command line arguments to
`protoc`.
"""
command_arguments = [argument.encode() for argument in command_arguments]
return _protoc_compiler.run_main(command_arguments) | [
"def",
"main",
"(",
"command_arguments",
")",
":",
"command_arguments",
"=",
"[",
"argument",
".",
"encode",
"(",
")",
"for",
"argument",
"in",
"command_arguments",
"]",
"return",
"_protoc_compiler",
".",
"run_main",
"(",
"command_arguments",
")"
] | https://github.com/grpc/grpc/blob/27bc6fe7797e43298dc931b96dc57322d0852a9f/tools/distrib/python/grpcio_tools/grpc_tools/protoc.py#L29-L37 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Tools/Python/3.7.10/windows/Lib/tkinter/__init__.py | python | PhotoImage.get | (self, x, y) | return self.tk.call(self.name, 'get', x, y) | Return the color (red, green, blue) of the pixel at X,Y. | Return the color (red, green, blue) of the pixel at X,Y. | [
"Return",
"the",
"color",
"(",
"red",
"green",
"blue",
")",
"of",
"the",
"pixel",
"at",
"X",
"Y",
"."
] | def get(self, x, y):
"""Return the color (red, green, blue) of the pixel at X,Y."""
return self.tk.call(self.name, 'get', x, y) | [
"def",
"get",
"(",
"self",
",",
"x",
",",
"y",
")",
":",
"return",
"self",
".",
"tk",
".",
"call",
"(",
"self",
".",
"name",
",",
"'get'",
",",
"x",
",",
"y",
")"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Tools/Python/3.7.10/windows/Lib/tkinter/__init__.py#L3579-L3581 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python/src/Lib/lib-tk/tkFileDialog.py | python | asksaveasfilename | (**options) | return SaveAs(**options).show() | Ask for a filename to save as | Ask for a filename to save as | [
"Ask",
"for",
"a",
"filename",
"to",
"save",
"as"
] | def asksaveasfilename(**options):
"Ask for a filename to save as"
return SaveAs(**options).show() | [
"def",
"asksaveasfilename",
"(",
"*",
"*",
"options",
")",
":",
"return",
"SaveAs",
"(",
"*",
"*",
"options",
")",
".",
"show",
"(",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python/src/Lib/lib-tk/tkFileDialog.py#L127-L130 |
|
cms-sw/cmssw | fd9de012d503d3405420bcbeec0ec879baa57cf2 | FWCore/ParameterSet/python/SequenceTypes.py | python | _Sequenceable.isOperation | (self) | return False | Returns True if the object is an operator (e.g. *,+ or !) type | Returns True if the object is an operator (e.g. *,+ or !) type | [
"Returns",
"True",
"if",
"the",
"object",
"is",
"an",
"operator",
"(",
"e",
".",
"g",
".",
"*",
"+",
"or",
"!",
")",
"type"
] | def isOperation(self):
"""Returns True if the object is an operator (e.g. *,+ or !) type"""
return False | [
"def",
"isOperation",
"(",
"self",
")",
":",
"return",
"False"
] | https://github.com/cms-sw/cmssw/blob/fd9de012d503d3405420bcbeec0ec879baa57cf2/FWCore/ParameterSet/python/SequenceTypes.py#L35-L37 |
|
thalium/icebox | 99d147d5b9269222225443ce171b4fd46d8985d4 | third_party/virtualbox/src/VBox/VMM/VMMAll/IEMAllInstructionsPython.py | python | SimpleParser.parseTagOpCopyTests | (self, sTag, aasSections, iTagLine, iEndLine) | return True | Tag: \@opcopytests
Value: <opstat | function> [..]
Example: \@opcopytests add_Eb_Gb
Trick to avoid duplicating tests for different encodings of the same
operation. | Tag: \@opcopytests
Value: <opstat | function> [..]
Example: \@opcopytests add_Eb_Gb | [
"Tag",
":",
"\\",
"@opcopytests",
"Value",
":",
"<opstat",
"|",
"function",
">",
"[",
"..",
"]",
"Example",
":",
"\\",
"@opcopytests",
"add_Eb_Gb"
] | def parseTagOpCopyTests(self, sTag, aasSections, iTagLine, iEndLine):
"""
Tag: \@opcopytests
Value: <opstat | function> [..]
Example: \@opcopytests add_Eb_Gb
Trick to avoid duplicating tests for different encodings of the same
operation.
"""
oInstr = self.ensureInstructionForOpTag(iTagLine);
# Flatten, validate and append the copy job to the instruction. We execute
# them after parsing all the input so we can handle forward references.
asToCopy = self.flattenAllSections(aasSections).split();
if not asToCopy:
return self.errorComment(iTagLine, '%s: requires at least on reference value' % (sTag,));
for sToCopy in asToCopy:
if sToCopy not in oInstr.asCopyTests:
if self.oReStatsName.match(sToCopy) or self.oReFunctionName.match(sToCopy):
oInstr.asCopyTests.append(sToCopy);
else:
self.errorComment(iTagLine, '%s: invalid instruction reference (opstat or function) "%s" (valid: %s or %s)'
% (sTag, sToCopy, self.oReStatsName.pattern, self.oReFunctionName.pattern));
else:
self.errorComment(iTagLine, '%s: ignoring duplicate "%s"' % (sTag, sToCopy,));
_ = iEndLine;
return True; | [
"def",
"parseTagOpCopyTests",
"(",
"self",
",",
"sTag",
",",
"aasSections",
",",
"iTagLine",
",",
"iEndLine",
")",
":",
"oInstr",
"=",
"self",
".",
"ensureInstructionForOpTag",
"(",
"iTagLine",
")",
"# Flatten, validate and append the copy job to the instruction. We execute",
"# them after parsing all the input so we can handle forward references.",
"asToCopy",
"=",
"self",
".",
"flattenAllSections",
"(",
"aasSections",
")",
".",
"split",
"(",
")",
"if",
"not",
"asToCopy",
":",
"return",
"self",
".",
"errorComment",
"(",
"iTagLine",
",",
"'%s: requires at least on reference value'",
"%",
"(",
"sTag",
",",
")",
")",
"for",
"sToCopy",
"in",
"asToCopy",
":",
"if",
"sToCopy",
"not",
"in",
"oInstr",
".",
"asCopyTests",
":",
"if",
"self",
".",
"oReStatsName",
".",
"match",
"(",
"sToCopy",
")",
"or",
"self",
".",
"oReFunctionName",
".",
"match",
"(",
"sToCopy",
")",
":",
"oInstr",
".",
"asCopyTests",
".",
"append",
"(",
"sToCopy",
")",
"else",
":",
"self",
".",
"errorComment",
"(",
"iTagLine",
",",
"'%s: invalid instruction reference (opstat or function) \"%s\" (valid: %s or %s)'",
"%",
"(",
"sTag",
",",
"sToCopy",
",",
"self",
".",
"oReStatsName",
".",
"pattern",
",",
"self",
".",
"oReFunctionName",
".",
"pattern",
")",
")",
"else",
":",
"self",
".",
"errorComment",
"(",
"iTagLine",
",",
"'%s: ignoring duplicate \"%s\"'",
"%",
"(",
"sTag",
",",
"sToCopy",
",",
")",
")",
"_",
"=",
"iEndLine",
"return",
"True"
] | https://github.com/thalium/icebox/blob/99d147d5b9269222225443ce171b4fd46d8985d4/third_party/virtualbox/src/VBox/VMM/VMMAll/IEMAllInstructionsPython.py#L2640-L2667 |
|
apple/turicreate | cce55aa5311300e3ce6af93cb45ba791fd1bdf49 | src/python/turicreate/toolkits/classifier/logistic_classifier.py | python | LogisticClassifier.predict_topk | (
self, dataset, output_type="probability", k=3, missing_value_action="auto"
) | return self.__proxy__.predict_topk(
dataset, missing_value_action, output_type, k
) | Return top-k predictions for the ``dataset``, using the trained model.
Predictions are returned as an SFrame with three columns: `id`,
`class`, and `probability`, `margin`, or `rank`, depending on the ``output_type``
parameter. Input dataset size must be the same as for training of the model.
Parameters
----------
dataset : SFrame
A dataset that has the same columns that were used during training.
If the target column exists in ``dataset`` it will be ignored
while making predictions.
output_type : {'probability', 'rank', 'margin'}, optional
Choose the return type of the prediction:
- `probability`: Probability associated with each label in the prediction.
- `rank` : Rank associated with each label in the prediction.
- `margin` : Margin associated with each label in the prediction.
k : int, optional
Number of classes to return for each input example.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': Default to 'impute'
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : SFrame
An SFrame with model predictions.
See Also
--------
predict, classify, evaluate
Examples
--------
>>> pred = m.predict_topk(validation_data, k=3)
>>> pred
+--------+-------+-------------------+
| id | class | probability |
+--------+-------+-------------------+
| 0 | 4 | 0.995623886585 |
| 0 | 9 | 0.0038311756216 |
| 0 | 7 | 0.000301006948575 |
| 1 | 1 | 0.928708016872 |
| 1 | 3 | 0.0440889261663 |
| 1 | 2 | 0.0176190119237 |
| 2 | 3 | 0.996967732906 |
| 2 | 2 | 0.00151345680933 |
| 2 | 7 | 0.000637513934635 |
| 3 | 1 | 0.998070061207 |
| ... | ... | ... |
+--------+-------+-------------------+
[35688 rows x 3 columns] | Return top-k predictions for the ``dataset``, using the trained model.
Predictions are returned as an SFrame with three columns: `id`,
`class`, and `probability`, `margin`, or `rank`, depending on the ``output_type``
parameter. Input dataset size must be the same as for training of the model. | [
"Return",
"top",
"-",
"k",
"predictions",
"for",
"the",
"dataset",
"using",
"the",
"trained",
"model",
".",
"Predictions",
"are",
"returned",
"as",
"an",
"SFrame",
"with",
"three",
"columns",
":",
"id",
"class",
"and",
"probability",
"margin",
"or",
"rank",
"depending",
"on",
"the",
"output_type",
"parameter",
".",
"Input",
"dataset",
"size",
"must",
"be",
"the",
"same",
"as",
"for",
"training",
"of",
"the",
"model",
"."
] | def predict_topk(
self, dataset, output_type="probability", k=3, missing_value_action="auto"
):
"""
Return top-k predictions for the ``dataset``, using the trained model.
Predictions are returned as an SFrame with three columns: `id`,
`class`, and `probability`, `margin`, or `rank`, depending on the ``output_type``
parameter. Input dataset size must be the same as for training of the model.
Parameters
----------
dataset : SFrame
A dataset that has the same columns that were used during training.
If the target column exists in ``dataset`` it will be ignored
while making predictions.
output_type : {'probability', 'rank', 'margin'}, optional
Choose the return type of the prediction:
- `probability`: Probability associated with each label in the prediction.
- `rank` : Rank associated with each label in the prediction.
- `margin` : Margin associated with each label in the prediction.
k : int, optional
Number of classes to return for each input example.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': Default to 'impute'
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : SFrame
An SFrame with model predictions.
See Also
--------
predict, classify, evaluate
Examples
--------
>>> pred = m.predict_topk(validation_data, k=3)
>>> pred
+--------+-------+-------------------+
| id | class | probability |
+--------+-------+-------------------+
| 0 | 4 | 0.995623886585 |
| 0 | 9 | 0.0038311756216 |
| 0 | 7 | 0.000301006948575 |
| 1 | 1 | 0.928708016872 |
| 1 | 3 | 0.0440889261663 |
| 1 | 2 | 0.0176190119237 |
| 2 | 3 | 0.996967732906 |
| 2 | 2 | 0.00151345680933 |
| 2 | 7 | 0.000637513934635 |
| 3 | 1 | 0.998070061207 |
| ... | ... | ... |
+--------+-------+-------------------+
[35688 rows x 3 columns]
"""
_check_categorical_option_type(
"output_type", output_type, ["rank", "margin", "probability"]
)
_check_categorical_option_type(
"missing_value_action", missing_value_action, ["auto", "impute", "error"]
)
if missing_value_action == "auto":
missing_value_action = "impute"
# Low latency path
if isinstance(dataset, list):
return self.__proxy__.fast_predict_topk(
dataset, missing_value_action, output_type, k
)
if isinstance(dataset, dict):
return self.__proxy__.fast_predict_topk(
[dataset], missing_value_action, output_type, k
)
# Fast path
_raise_error_if_not_sframe(dataset, "dataset")
if missing_value_action == "auto":
missing_value_action = _sl.select_default_missing_value_policy(
self, "predict"
)
return self.__proxy__.predict_topk(
dataset, missing_value_action, output_type, k
) | [
"def",
"predict_topk",
"(",
"self",
",",
"dataset",
",",
"output_type",
"=",
"\"probability\"",
",",
"k",
"=",
"3",
",",
"missing_value_action",
"=",
"\"auto\"",
")",
":",
"_check_categorical_option_type",
"(",
"\"output_type\"",
",",
"output_type",
",",
"[",
"\"rank\"",
",",
"\"margin\"",
",",
"\"probability\"",
"]",
")",
"_check_categorical_option_type",
"(",
"\"missing_value_action\"",
",",
"missing_value_action",
",",
"[",
"\"auto\"",
",",
"\"impute\"",
",",
"\"error\"",
"]",
")",
"if",
"missing_value_action",
"==",
"\"auto\"",
":",
"missing_value_action",
"=",
"\"impute\"",
"# Low latency path",
"if",
"isinstance",
"(",
"dataset",
",",
"list",
")",
":",
"return",
"self",
".",
"__proxy__",
".",
"fast_predict_topk",
"(",
"dataset",
",",
"missing_value_action",
",",
"output_type",
",",
"k",
")",
"if",
"isinstance",
"(",
"dataset",
",",
"dict",
")",
":",
"return",
"self",
".",
"__proxy__",
".",
"fast_predict_topk",
"(",
"[",
"dataset",
"]",
",",
"missing_value_action",
",",
"output_type",
",",
"k",
")",
"# Fast path",
"_raise_error_if_not_sframe",
"(",
"dataset",
",",
"\"dataset\"",
")",
"if",
"missing_value_action",
"==",
"\"auto\"",
":",
"missing_value_action",
"=",
"_sl",
".",
"select_default_missing_value_policy",
"(",
"self",
",",
"\"predict\"",
")",
"return",
"self",
".",
"__proxy__",
".",
"predict_topk",
"(",
"dataset",
",",
"missing_value_action",
",",
"output_type",
",",
"k",
")"
] | https://github.com/apple/turicreate/blob/cce55aa5311300e3ce6af93cb45ba791fd1bdf49/src/python/turicreate/toolkits/classifier/logistic_classifier.py#L696-L790 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/msw/_controls.py | python | TreeCtrl.IsSelected | (*args, **kwargs) | return _controls_.TreeCtrl_IsSelected(*args, **kwargs) | IsSelected(self, TreeItemId item) -> bool | IsSelected(self, TreeItemId item) -> bool | [
"IsSelected",
"(",
"self",
"TreeItemId",
"item",
")",
"-",
">",
"bool"
] | def IsSelected(*args, **kwargs):
"""IsSelected(self, TreeItemId item) -> bool"""
return _controls_.TreeCtrl_IsSelected(*args, **kwargs) | [
"def",
"IsSelected",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_controls_",
".",
"TreeCtrl_IsSelected",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/msw/_controls.py#L5350-L5352 |
|
facebook/ThreatExchange | 31914a51820c73c8a0daffe62ccca29a6e3d359e | python-threatexchange/threatexchange/hashing/pdq_utils.py | python | simple_distance_binary | (bin_a, bin_b) | return sum(bin_a[i] != bin_b[i] for i in range(BITS_IN_PDQ)) | Returns the hamming distance of two binary strings. | Returns the hamming distance of two binary strings. | [
"Returns",
"the",
"hamming",
"distance",
"of",
"two",
"binary",
"strings",
"."
] | def simple_distance_binary(bin_a, bin_b):
"""
Returns the hamming distance of two binary strings.
"""
assert len(bin_a) == BITS_IN_PDQ
assert len(bin_b) == BITS_IN_PDQ
return sum(bin_a[i] != bin_b[i] for i in range(BITS_IN_PDQ)) | [
"def",
"simple_distance_binary",
"(",
"bin_a",
",",
"bin_b",
")",
":",
"assert",
"len",
"(",
"bin_a",
")",
"==",
"BITS_IN_PDQ",
"assert",
"len",
"(",
"bin_b",
")",
"==",
"BITS_IN_PDQ",
"return",
"sum",
"(",
"bin_a",
"[",
"i",
"]",
"!=",
"bin_b",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"BITS_IN_PDQ",
")",
")"
] | https://github.com/facebook/ThreatExchange/blob/31914a51820c73c8a0daffe62ccca29a6e3d359e/python-threatexchange/threatexchange/hashing/pdq_utils.py#L7-L13 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | contrib/gizmos/osx_cocoa/gizmos.py | python | TreeListColumnInfo.SetSelectedImage | (*args, **kwargs) | return _gizmos.TreeListColumnInfo_SetSelectedImage(*args, **kwargs) | SetSelectedImage(self, int image) | SetSelectedImage(self, int image) | [
"SetSelectedImage",
"(",
"self",
"int",
"image",
")"
] | def SetSelectedImage(*args, **kwargs):
"""SetSelectedImage(self, int image)"""
return _gizmos.TreeListColumnInfo_SetSelectedImage(*args, **kwargs) | [
"def",
"SetSelectedImage",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_gizmos",
".",
"TreeListColumnInfo_SetSelectedImage",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/contrib/gizmos/osx_cocoa/gizmos.py#L444-L446 |
|
catboost/catboost | 167f64f237114a4d10b2b4ee42adb4569137debe | contrib/tools/python3/src/Lib/turtle.py | python | TPen.resizemode | (self, rmode=None) | Set resizemode to one of the values: "auto", "user", "noresize".
(Optional) Argument:
rmode -- one of the strings "auto", "user", "noresize"
Different resizemodes have the following effects:
- "auto" adapts the appearance of the turtle
corresponding to the value of pensize.
- "user" adapts the appearance of the turtle according to the
values of stretchfactor and outlinewidth (outline),
which are set by shapesize()
- "noresize" no adaption of the turtle's appearance takes place.
If no argument is given, return current resizemode.
resizemode("user") is called by a call of shapesize with arguments.
Examples (for a Turtle instance named turtle):
>>> turtle.resizemode("noresize")
>>> turtle.resizemode()
'noresize' | Set resizemode to one of the values: "auto", "user", "noresize". | [
"Set",
"resizemode",
"to",
"one",
"of",
"the",
"values",
":",
"auto",
"user",
"noresize",
"."
] | def resizemode(self, rmode=None):
"""Set resizemode to one of the values: "auto", "user", "noresize".
(Optional) Argument:
rmode -- one of the strings "auto", "user", "noresize"
Different resizemodes have the following effects:
- "auto" adapts the appearance of the turtle
corresponding to the value of pensize.
- "user" adapts the appearance of the turtle according to the
values of stretchfactor and outlinewidth (outline),
which are set by shapesize()
- "noresize" no adaption of the turtle's appearance takes place.
If no argument is given, return current resizemode.
resizemode("user") is called by a call of shapesize with arguments.
Examples (for a Turtle instance named turtle):
>>> turtle.resizemode("noresize")
>>> turtle.resizemode()
'noresize'
"""
if rmode is None:
return self._resizemode
rmode = rmode.lower()
if rmode in ["auto", "user", "noresize"]:
self.pen(resizemode=rmode) | [
"def",
"resizemode",
"(",
"self",
",",
"rmode",
"=",
"None",
")",
":",
"if",
"rmode",
"is",
"None",
":",
"return",
"self",
".",
"_resizemode",
"rmode",
"=",
"rmode",
".",
"lower",
"(",
")",
"if",
"rmode",
"in",
"[",
"\"auto\"",
",",
"\"user\"",
",",
"\"noresize\"",
"]",
":",
"self",
".",
"pen",
"(",
"resizemode",
"=",
"rmode",
")"
] | https://github.com/catboost/catboost/blob/167f64f237114a4d10b2b4ee42adb4569137debe/contrib/tools/python3/src/Lib/turtle.py#L2045-L2071 |
||
SequoiaDB/SequoiaDB | 2894ed7e5bd6fe57330afc900cf76d0ff0df9f64 | tools/server/php_linux/libxml2/lib/python2.4/site-packages/libxml2.py | python | uCSIsMathematicalOperators | (code) | return ret | Check whether the character is part of
MathematicalOperators UCS Block | Check whether the character is part of
MathematicalOperators UCS Block | [
"Check",
"whether",
"the",
"character",
"is",
"part",
"of",
"MathematicalOperators",
"UCS",
"Block"
] | def uCSIsMathematicalOperators(code):
"""Check whether the character is part of
MathematicalOperators UCS Block """
ret = libxml2mod.xmlUCSIsMathematicalOperators(code)
return ret | [
"def",
"uCSIsMathematicalOperators",
"(",
"code",
")",
":",
"ret",
"=",
"libxml2mod",
".",
"xmlUCSIsMathematicalOperators",
"(",
"code",
")",
"return",
"ret"
] | https://github.com/SequoiaDB/SequoiaDB/blob/2894ed7e5bd6fe57330afc900cf76d0ff0df9f64/tools/server/php_linux/libxml2/lib/python2.4/site-packages/libxml2.py#L2684-L2688 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_carbon/_core.py | python | PyApp.SetPrintMode | (*args, **kwargs) | return _core_.PyApp_SetPrintMode(*args, **kwargs) | SetPrintMode(self, int mode) | SetPrintMode(self, int mode) | [
"SetPrintMode",
"(",
"self",
"int",
"mode",
")"
] | def SetPrintMode(*args, **kwargs):
"""SetPrintMode(self, int mode)"""
return _core_.PyApp_SetPrintMode(*args, **kwargs) | [
"def",
"SetPrintMode",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_core_",
".",
"PyApp_SetPrintMode",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_carbon/_core.py#L8107-L8109 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/gtk/aui.py | python | AuiToolBarEvent.SetDropDownClicked | (*args, **kwargs) | return _aui.AuiToolBarEvent_SetDropDownClicked(*args, **kwargs) | SetDropDownClicked(self, bool c) | SetDropDownClicked(self, bool c) | [
"SetDropDownClicked",
"(",
"self",
"bool",
"c",
")"
] | def SetDropDownClicked(*args, **kwargs):
"""SetDropDownClicked(self, bool c)"""
return _aui.AuiToolBarEvent_SetDropDownClicked(*args, **kwargs) | [
"def",
"SetDropDownClicked",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_aui",
".",
"AuiToolBarEvent_SetDropDownClicked",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/gtk/aui.py#L1685-L1687 |
|
benoitsteiner/tensorflow-opencl | cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5 | tensorflow/python/ops/resource_variable_ops.py | python | ResourceVariable.sparse_read | (self, indices, name=None) | return array_ops.identity(value) | Reads the value of this variable sparsely, using `gather`. | Reads the value of this variable sparsely, using `gather`. | [
"Reads",
"the",
"value",
"of",
"this",
"variable",
"sparsely",
"using",
"gather",
"."
] | def sparse_read(self, indices, name=None):
"""Reads the value of this variable sparsely, using `gather`."""
with ops.name_scope("Gather" if name is None else name) as name:
if self._trainable:
tape.watch_variable(self)
value = gen_resource_variable_ops.resource_gather(
self._handle, indices, dtype=self._dtype, name=name)
return array_ops.identity(value) | [
"def",
"sparse_read",
"(",
"self",
",",
"indices",
",",
"name",
"=",
"None",
")",
":",
"with",
"ops",
".",
"name_scope",
"(",
"\"Gather\"",
"if",
"name",
"is",
"None",
"else",
"name",
")",
"as",
"name",
":",
"if",
"self",
".",
"_trainable",
":",
"tape",
".",
"watch_variable",
"(",
"self",
")",
"value",
"=",
"gen_resource_variable_ops",
".",
"resource_gather",
"(",
"self",
".",
"_handle",
",",
"indices",
",",
"dtype",
"=",
"self",
".",
"_dtype",
",",
"name",
"=",
"name",
")",
"return",
"array_ops",
".",
"identity",
"(",
"value",
")"
] | https://github.com/benoitsteiner/tensorflow-opencl/blob/cb7cb40a57fde5cfd4731bc551e82a1e2fef43a5/tensorflow/python/ops/resource_variable_ops.py#L618-L625 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/pkg_resources/__init__.py | python | IMetadataProvider.has_metadata | (name) | Does the package's distribution contain the named metadata? | Does the package's distribution contain the named metadata? | [
"Does",
"the",
"package",
"s",
"distribution",
"contain",
"the",
"named",
"metadata?"
] | def has_metadata(name):
"""Does the package's distribution contain the named metadata?""" | [
"def",
"has_metadata",
"(",
"name",
")",
":"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Lib/pkg_resources/__init__.py#L586-L587 |
||
miyosuda/TensorFlowAndroidMNIST | 7b5a4603d2780a8a2834575706e9001977524007 | jni-build/jni/include/tensorflow/models/rnn/translate/translate.py | python | create_model | (session, forward_only) | return model | Create translation model and initialize or load parameters in session. | Create translation model and initialize or load parameters in session. | [
"Create",
"translation",
"model",
"and",
"initialize",
"or",
"load",
"parameters",
"in",
"session",
"."
] | def create_model(session, forward_only):
"""Create translation model and initialize or load parameters in session."""
model = seq2seq_model.Seq2SeqModel(
FLAGS.en_vocab_size, FLAGS.fr_vocab_size, _buckets,
FLAGS.size, FLAGS.num_layers, FLAGS.max_gradient_norm, FLAGS.batch_size,
FLAGS.learning_rate, FLAGS.learning_rate_decay_factor,
forward_only=forward_only)
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path):
print("Reading model parameters from %s" % ckpt.model_checkpoint_path)
model.saver.restore(session, ckpt.model_checkpoint_path)
else:
print("Created model with fresh parameters.")
session.run(tf.initialize_all_variables())
return model | [
"def",
"create_model",
"(",
"session",
",",
"forward_only",
")",
":",
"model",
"=",
"seq2seq_model",
".",
"Seq2SeqModel",
"(",
"FLAGS",
".",
"en_vocab_size",
",",
"FLAGS",
".",
"fr_vocab_size",
",",
"_buckets",
",",
"FLAGS",
".",
"size",
",",
"FLAGS",
".",
"num_layers",
",",
"FLAGS",
".",
"max_gradient_norm",
",",
"FLAGS",
".",
"batch_size",
",",
"FLAGS",
".",
"learning_rate",
",",
"FLAGS",
".",
"learning_rate_decay_factor",
",",
"forward_only",
"=",
"forward_only",
")",
"ckpt",
"=",
"tf",
".",
"train",
".",
"get_checkpoint_state",
"(",
"FLAGS",
".",
"train_dir",
")",
"if",
"ckpt",
"and",
"tf",
".",
"gfile",
".",
"Exists",
"(",
"ckpt",
".",
"model_checkpoint_path",
")",
":",
"print",
"(",
"\"Reading model parameters from %s\"",
"%",
"ckpt",
".",
"model_checkpoint_path",
")",
"model",
".",
"saver",
".",
"restore",
"(",
"session",
",",
"ckpt",
".",
"model_checkpoint_path",
")",
"else",
":",
"print",
"(",
"\"Created model with fresh parameters.\"",
")",
"session",
".",
"run",
"(",
"tf",
".",
"initialize_all_variables",
"(",
")",
")",
"return",
"model"
] | https://github.com/miyosuda/TensorFlowAndroidMNIST/blob/7b5a4603d2780a8a2834575706e9001977524007/jni-build/jni/include/tensorflow/models/rnn/translate/translate.py#L115-L129 |
|
ChromiumWebApps/chromium | c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7 | tools/json_schema_compiler/model.py | python | Property.__init__ | (self, parent, name, json, namespace, origin) | Creates a Property from JSON. | Creates a Property from JSON. | [
"Creates",
"a",
"Property",
"from",
"JSON",
"."
] | def __init__(self, parent, name, json, namespace, origin):
"""Creates a Property from JSON.
"""
self.parent = parent
self.name = name
self._unix_name = UnixName(self.name)
self._unix_name_used = False
self.origin = origin
self.simple_name = _StripNamespace(self.name, namespace)
self.description = json.get('description', None)
self.optional = json.get('optional', None)
self.instance_of = json.get('isInstanceOf', None)
self.deprecated = json.get('deprecated')
# HACK: only support very specific value types.
is_allowed_value = (
'$ref' not in json and
('type' not in json or json['type'] == 'integer'
or json['type'] == 'string'))
self.value = None
if 'value' in json and is_allowed_value:
self.value = json['value']
if 'type' not in json:
# Sometimes the type of the value is left out, and we need to figure
# it out for ourselves.
if isinstance(self.value, int):
json['type'] = 'integer'
elif isinstance(self.value, basestring):
json['type'] = 'string'
else:
# TODO(kalman): support more types as necessary.
raise ParseException(
parent,
'"%s" is not a supported type for "value"' % type(self.value))
self.type_ = Type(parent, name, json, namespace, origin) | [
"def",
"__init__",
"(",
"self",
",",
"parent",
",",
"name",
",",
"json",
",",
"namespace",
",",
"origin",
")",
":",
"self",
".",
"parent",
"=",
"parent",
"self",
".",
"name",
"=",
"name",
"self",
".",
"_unix_name",
"=",
"UnixName",
"(",
"self",
".",
"name",
")",
"self",
".",
"_unix_name_used",
"=",
"False",
"self",
".",
"origin",
"=",
"origin",
"self",
".",
"simple_name",
"=",
"_StripNamespace",
"(",
"self",
".",
"name",
",",
"namespace",
")",
"self",
".",
"description",
"=",
"json",
".",
"get",
"(",
"'description'",
",",
"None",
")",
"self",
".",
"optional",
"=",
"json",
".",
"get",
"(",
"'optional'",
",",
"None",
")",
"self",
".",
"instance_of",
"=",
"json",
".",
"get",
"(",
"'isInstanceOf'",
",",
"None",
")",
"self",
".",
"deprecated",
"=",
"json",
".",
"get",
"(",
"'deprecated'",
")",
"# HACK: only support very specific value types.",
"is_allowed_value",
"=",
"(",
"'$ref'",
"not",
"in",
"json",
"and",
"(",
"'type'",
"not",
"in",
"json",
"or",
"json",
"[",
"'type'",
"]",
"==",
"'integer'",
"or",
"json",
"[",
"'type'",
"]",
"==",
"'string'",
")",
")",
"self",
".",
"value",
"=",
"None",
"if",
"'value'",
"in",
"json",
"and",
"is_allowed_value",
":",
"self",
".",
"value",
"=",
"json",
"[",
"'value'",
"]",
"if",
"'type'",
"not",
"in",
"json",
":",
"# Sometimes the type of the value is left out, and we need to figure",
"# it out for ourselves.",
"if",
"isinstance",
"(",
"self",
".",
"value",
",",
"int",
")",
":",
"json",
"[",
"'type'",
"]",
"=",
"'integer'",
"elif",
"isinstance",
"(",
"self",
".",
"value",
",",
"basestring",
")",
":",
"json",
"[",
"'type'",
"]",
"=",
"'string'",
"else",
":",
"# TODO(kalman): support more types as necessary.",
"raise",
"ParseException",
"(",
"parent",
",",
"'\"%s\" is not a supported type for \"value\"'",
"%",
"type",
"(",
"self",
".",
"value",
")",
")",
"self",
".",
"type_",
"=",
"Type",
"(",
"parent",
",",
"name",
",",
"json",
",",
"namespace",
",",
"origin",
")"
] | https://github.com/ChromiumWebApps/chromium/blob/c7361d39be8abd1574e6ce8957c8dbddd4c6ccf7/tools/json_schema_compiler/model.py#L338-L374 |
||
mongodb/mongo | d8ff665343ad29cf286ee2cf4a1960d29371937b | buildscripts/ciconfig/evergreen.py | python | Task.tags | (self) | return self._tags | Get a set of tags this task has been marked with. | Get a set of tags this task has been marked with. | [
"Get",
"a",
"set",
"of",
"tags",
"this",
"task",
"has",
"been",
"marked",
"with",
"."
] | def tags(self):
"""Get a set of tags this task has been marked with."""
if self._tags is None:
self._tags = set(self.raw.get("tags", []))
return self._tags | [
"def",
"tags",
"(",
"self",
")",
":",
"if",
"self",
".",
"_tags",
"is",
"None",
":",
"self",
".",
"_tags",
"=",
"set",
"(",
"self",
".",
"raw",
".",
"get",
"(",
"\"tags\"",
",",
"[",
"]",
")",
")",
"return",
"self",
".",
"_tags"
] | https://github.com/mongodb/mongo/blob/d8ff665343ad29cf286ee2cf4a1960d29371937b/buildscripts/ciconfig/evergreen.py#L205-L209 |
|
PaddlePaddle/Paddle | 1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c | python/paddle/distributed/collective.py | python | reduce | (tensor, dst, op=ReduceOp.SUM, group=None, use_calc_stream=True) | Reduce a tensor to the destination from all others. As shown below, 4 GPUs each start 4 processes and the data on each GPU is respresnted
by the GPU number. The destination of the reduce operator is GPU0 and the process is sum. Through reduce operator,
the GPU0 will owns the sum of all data from all GPUs.
.. image:: https://githubraw.cdn.bcebos.com/PaddlePaddle/docs/develop/docs/api/paddle/distributed/img/reduce.png
:width: 800
:alt: reduce
:align: center
Args:
tensor (Tensor): The output Tensor for the destination and the input Tensor otherwise. Its data type
should be float16, float32, float64, int32 or int64.
dst (int): The destination rank id.
op (ReduceOp.SUM|ReduceOp.MAX|ReduceOp.Min|ReduceOp.PROD): Optional. The operation used. Default value is ReduceOp.SUM.
group (Group): The group instance return by new_group or None for global default group.
use_calc_stream (bool): Wether to use calculation stream (True) or communication stream (False).
Default to True.
Returns:
None.
Examples:
.. code-block:: python
# required: distributed
import numpy as np
import paddle
from paddle.distributed import init_parallel_env
paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id)
init_parallel_env()
if paddle.distributed.ParallelEnv().local_rank == 0:
np_data = np.array([[4, 5, 6], [4, 5, 6]])
else:
np_data = np.array([[1, 2, 3], [1, 2, 3]])
data = paddle.to_tensor(np_data)
paddle.distributed.reduce(data, 0)
out = data.numpy()
# [[5, 7, 9], [5, 7, 9]] | [] | def reduce(tensor, dst, op=ReduceOp.SUM, group=None, use_calc_stream=True):
"""
Reduce a tensor to the destination from all others. As shown below, 4 GPUs each start 4 processes and the data on each GPU is respresnted
by the GPU number. The destination of the reduce operator is GPU0 and the process is sum. Through reduce operator,
the GPU0 will owns the sum of all data from all GPUs.
.. image:: https://githubraw.cdn.bcebos.com/PaddlePaddle/docs/develop/docs/api/paddle/distributed/img/reduce.png
:width: 800
:alt: reduce
:align: center
Args:
tensor (Tensor): The output Tensor for the destination and the input Tensor otherwise. Its data type
should be float16, float32, float64, int32 or int64.
dst (int): The destination rank id.
op (ReduceOp.SUM|ReduceOp.MAX|ReduceOp.Min|ReduceOp.PROD): Optional. The operation used. Default value is ReduceOp.SUM.
group (Group): The group instance return by new_group or None for global default group.
use_calc_stream (bool): Wether to use calculation stream (True) or communication stream (False).
Default to True.
Returns:
None.
Examples:
.. code-block:: python
# required: distributed
import numpy as np
import paddle
from paddle.distributed import init_parallel_env
paddle.set_device('gpu:%d'%paddle.distributed.ParallelEnv().dev_id)
init_parallel_env()
if paddle.distributed.ParallelEnv().local_rank == 0:
np_data = np.array([[4, 5, 6], [4, 5, 6]])
else:
np_data = np.array([[1, 2, 3], [1, 2, 3]])
data = paddle.to_tensor(np_data)
paddle.distributed.reduce(data, 0)
out = data.numpy()
# [[5, 7, 9], [5, 7, 9]]
"""
if group is not None and not group.is_member():
return
if not isinstance(dst, int):
raise ValueError("dst should be int.")
ring_id = 0 if group is None else group.id
gdst = dst if group is None else group.get_group_rank(dst)
assert gdst >= 0, ("dst rank out of group, need global rank")
if in_dygraph_mode():
if op == ReduceOp.SUM:
return _C_ops.c_reduce_sum(tensor, tensor, 'use_calc_stream',
use_calc_stream, 'ring_id', ring_id,
'root_id', gdst)
elif op == ReduceOp.MAX:
return _C_ops.c_reduce_max(tensor, tensor, 'use_calc_stream',
use_calc_stream, 'ring_id', ring_id,
'root_id', gdst)
elif op == ReduceOp.MIN:
return _C_ops.c_reduce_min(tensor, tensor, 'use_calc_stream',
use_calc_stream, 'ring_id', ring_id,
'root_id', gdst)
elif op == ReduceOp.PROD:
return _C_ops.c_reduce_prod(tensor, tensor, 'use_calc_stream',
use_calc_stream, 'ring_id', ring_id,
'root_id', gdst)
else:
raise ValueError("Unknown parameter: {}.".format(op))
op_type = 'c_reduce'
check_variable_and_dtype(
tensor, 'tensor', ['float16', 'float32', 'float64', 'int32', 'int64'],
'all_reduce')
if not op in [ReduceOp.SUM, ReduceOp.MAX, ReduceOp.MIN, ReduceOp.PROD]:
raise ValueError("The op for reduce must be one of educeOp.PROD, "
"ReduceOp.SUM, ReduceOp.MAX, ReduceOp.MIN.")
if op == ReduceOp.SUM:
op_type = 'c_reduce_sum'
elif op == ReduceOp.MAX:
op_type = 'c_reduce_max'
elif op == ReduceOp.MIN:
op_type = 'c_reduce_min'
elif op == ReduceOp.PROD:
op_type = 'c_reduce_prod'
helper = LayerHelper(op_type, **locals())
helper.append_op(
type=op_type,
inputs={'X': [tensor]},
outputs={'Out': [tensor]},
attrs={
'ring_id': ring_id,
'use_calc_stream': use_calc_stream,
'root_id': gdst,
}) | [
"def",
"reduce",
"(",
"tensor",
",",
"dst",
",",
"op",
"=",
"ReduceOp",
".",
"SUM",
",",
"group",
"=",
"None",
",",
"use_calc_stream",
"=",
"True",
")",
":",
"if",
"group",
"is",
"not",
"None",
"and",
"not",
"group",
".",
"is_member",
"(",
")",
":",
"return",
"if",
"not",
"isinstance",
"(",
"dst",
",",
"int",
")",
":",
"raise",
"ValueError",
"(",
"\"dst should be int.\"",
")",
"ring_id",
"=",
"0",
"if",
"group",
"is",
"None",
"else",
"group",
".",
"id",
"gdst",
"=",
"dst",
"if",
"group",
"is",
"None",
"else",
"group",
".",
"get_group_rank",
"(",
"dst",
")",
"assert",
"gdst",
">=",
"0",
",",
"(",
"\"dst rank out of group, need global rank\"",
")",
"if",
"in_dygraph_mode",
"(",
")",
":",
"if",
"op",
"==",
"ReduceOp",
".",
"SUM",
":",
"return",
"_C_ops",
".",
"c_reduce_sum",
"(",
"tensor",
",",
"tensor",
",",
"'use_calc_stream'",
",",
"use_calc_stream",
",",
"'ring_id'",
",",
"ring_id",
",",
"'root_id'",
",",
"gdst",
")",
"elif",
"op",
"==",
"ReduceOp",
".",
"MAX",
":",
"return",
"_C_ops",
".",
"c_reduce_max",
"(",
"tensor",
",",
"tensor",
",",
"'use_calc_stream'",
",",
"use_calc_stream",
",",
"'ring_id'",
",",
"ring_id",
",",
"'root_id'",
",",
"gdst",
")",
"elif",
"op",
"==",
"ReduceOp",
".",
"MIN",
":",
"return",
"_C_ops",
".",
"c_reduce_min",
"(",
"tensor",
",",
"tensor",
",",
"'use_calc_stream'",
",",
"use_calc_stream",
",",
"'ring_id'",
",",
"ring_id",
",",
"'root_id'",
",",
"gdst",
")",
"elif",
"op",
"==",
"ReduceOp",
".",
"PROD",
":",
"return",
"_C_ops",
".",
"c_reduce_prod",
"(",
"tensor",
",",
"tensor",
",",
"'use_calc_stream'",
",",
"use_calc_stream",
",",
"'ring_id'",
",",
"ring_id",
",",
"'root_id'",
",",
"gdst",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknown parameter: {}.\"",
".",
"format",
"(",
"op",
")",
")",
"op_type",
"=",
"'c_reduce'",
"check_variable_and_dtype",
"(",
"tensor",
",",
"'tensor'",
",",
"[",
"'float16'",
",",
"'float32'",
",",
"'float64'",
",",
"'int32'",
",",
"'int64'",
"]",
",",
"'all_reduce'",
")",
"if",
"not",
"op",
"in",
"[",
"ReduceOp",
".",
"SUM",
",",
"ReduceOp",
".",
"MAX",
",",
"ReduceOp",
".",
"MIN",
",",
"ReduceOp",
".",
"PROD",
"]",
":",
"raise",
"ValueError",
"(",
"\"The op for reduce must be one of educeOp.PROD, \"",
"\"ReduceOp.SUM, ReduceOp.MAX, ReduceOp.MIN.\"",
")",
"if",
"op",
"==",
"ReduceOp",
".",
"SUM",
":",
"op_type",
"=",
"'c_reduce_sum'",
"elif",
"op",
"==",
"ReduceOp",
".",
"MAX",
":",
"op_type",
"=",
"'c_reduce_max'",
"elif",
"op",
"==",
"ReduceOp",
".",
"MIN",
":",
"op_type",
"=",
"'c_reduce_min'",
"elif",
"op",
"==",
"ReduceOp",
".",
"PROD",
":",
"op_type",
"=",
"'c_reduce_prod'",
"helper",
"=",
"LayerHelper",
"(",
"op_type",
",",
"*",
"*",
"locals",
"(",
")",
")",
"helper",
".",
"append_op",
"(",
"type",
"=",
"op_type",
",",
"inputs",
"=",
"{",
"'X'",
":",
"[",
"tensor",
"]",
"}",
",",
"outputs",
"=",
"{",
"'Out'",
":",
"[",
"tensor",
"]",
"}",
",",
"attrs",
"=",
"{",
"'ring_id'",
":",
"ring_id",
",",
"'use_calc_stream'",
":",
"use_calc_stream",
",",
"'root_id'",
":",
"gdst",
",",
"}",
")"
] | https://github.com/PaddlePaddle/Paddle/blob/1252f4bb3e574df80aa6d18c7ddae1b3a90bd81c/python/paddle/distributed/collective.py#L516-L615 |
|||
mindspore-ai/mindspore | fb8fd3338605bb34fa5cea054e535a8b1d753fab | mindspore/python/mindspore/numpy/math_ops.py | python | dot | (a, b) | return res | Returns the dot product of two arrays.
Specifically,
If both `a` and `b` are 1-D arrays, it is inner product of vectors
(without complex conjugation).
If both `a` and `b` are 2-D arrays, it is matrix multiplication.
If either `a` or `b` is 0-D (scalar), it is equivalent to multiply.
If `a` is an `N-D` array and `b` is a 1-D array, it is a sum product
over the last axis of `a` and `b`.
If `a` is an `N-D` array and `b` is an `M-D` array (where ``M>=2``), it is a
sum product over the last axis of `a` and the second-to-last axis of `b`:
``dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])``
Note:
Numpy argument `out` is not supported.
On GPU, the supported dtypes are np.float16, and np.float32.
On CPU, the supported dtypes are np.float16, np.float32, and
np.float64.
Args:
a (Tensor): input tensor
b (Tensor): input tensor
Returns:
Tensor or scalar, the dot product of `a` and `b`. If `a` and `b` are
both scalars or both 1-D arrays then a scalar is returned;
otherwise an array is returned
Raises:
ValueError: If the last dimension of `a` is not the same size
as the second-to-last dimension of `b`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> a = np.full((1, 3), 7).astype('float32')
>>> b = np.full((2, 3, 4), 5).astype('float32')
>>> output = np.dot(a, b)
>>> print(output)
[[[105. 105. 105. 105.]
[105. 105. 105. 105.]]] | Returns the dot product of two arrays. | [
"Returns",
"the",
"dot",
"product",
"of",
"two",
"arrays",
"."
] | def dot(a, b):
"""
Returns the dot product of two arrays.
Specifically,
If both `a` and `b` are 1-D arrays, it is inner product of vectors
(without complex conjugation).
If both `a` and `b` are 2-D arrays, it is matrix multiplication.
If either `a` or `b` is 0-D (scalar), it is equivalent to multiply.
If `a` is an `N-D` array and `b` is a 1-D array, it is a sum product
over the last axis of `a` and `b`.
If `a` is an `N-D` array and `b` is an `M-D` array (where ``M>=2``), it is a
sum product over the last axis of `a` and the second-to-last axis of `b`:
``dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])``
Note:
Numpy argument `out` is not supported.
On GPU, the supported dtypes are np.float16, and np.float32.
On CPU, the supported dtypes are np.float16, np.float32, and
np.float64.
Args:
a (Tensor): input tensor
b (Tensor): input tensor
Returns:
Tensor or scalar, the dot product of `a` and `b`. If `a` and `b` are
both scalars or both 1-D arrays then a scalar is returned;
otherwise an array is returned
Raises:
ValueError: If the last dimension of `a` is not the same size
as the second-to-last dimension of `b`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore.numpy as np
>>> a = np.full((1, 3), 7).astype('float32')
>>> b = np.full((2, 3, 4), 5).astype('float32')
>>> output = np.dot(a, b)
>>> print(output)
[[[105. 105. 105. 105.]
[105. 105. 105. 105.]]]
"""
ndim_a, ndim_b = F.rank(a), F.rank(b)
if ndim_a == 0 or ndim_b == 0:
return F.tensor_mul(a, b)
if ndim_a > 0 and ndim_b >= 2:
perm = F.make_range(ndim_b)
perm = perm[:-2] + (perm[-1],) + (perm[-2],)
b = F.transpose(b, perm)
if F.shape(a)[-1] != F.shape(b)[-1]:
_raise_value_error('shapes are not aligned')
a_aligned = F.reshape(a, (-1, F.shape(a)[-1]))
b_aligned = F.reshape(b, (-1, F.shape(b)[-1]))
res = _matmul_t(a_aligned, b_aligned)
res = F.reshape(res, F.shape(a)[:-1] + F.shape(b)[:-1])
return res | [
"def",
"dot",
"(",
"a",
",",
"b",
")",
":",
"ndim_a",
",",
"ndim_b",
"=",
"F",
".",
"rank",
"(",
"a",
")",
",",
"F",
".",
"rank",
"(",
"b",
")",
"if",
"ndim_a",
"==",
"0",
"or",
"ndim_b",
"==",
"0",
":",
"return",
"F",
".",
"tensor_mul",
"(",
"a",
",",
"b",
")",
"if",
"ndim_a",
">",
"0",
"and",
"ndim_b",
">=",
"2",
":",
"perm",
"=",
"F",
".",
"make_range",
"(",
"ndim_b",
")",
"perm",
"=",
"perm",
"[",
":",
"-",
"2",
"]",
"+",
"(",
"perm",
"[",
"-",
"1",
"]",
",",
")",
"+",
"(",
"perm",
"[",
"-",
"2",
"]",
",",
")",
"b",
"=",
"F",
".",
"transpose",
"(",
"b",
",",
"perm",
")",
"if",
"F",
".",
"shape",
"(",
"a",
")",
"[",
"-",
"1",
"]",
"!=",
"F",
".",
"shape",
"(",
"b",
")",
"[",
"-",
"1",
"]",
":",
"_raise_value_error",
"(",
"'shapes are not aligned'",
")",
"a_aligned",
"=",
"F",
".",
"reshape",
"(",
"a",
",",
"(",
"-",
"1",
",",
"F",
".",
"shape",
"(",
"a",
")",
"[",
"-",
"1",
"]",
")",
")",
"b_aligned",
"=",
"F",
".",
"reshape",
"(",
"b",
",",
"(",
"-",
"1",
",",
"F",
".",
"shape",
"(",
"b",
")",
"[",
"-",
"1",
"]",
")",
")",
"res",
"=",
"_matmul_t",
"(",
"a_aligned",
",",
"b_aligned",
")",
"res",
"=",
"F",
".",
"reshape",
"(",
"res",
",",
"F",
".",
"shape",
"(",
"a",
")",
"[",
":",
"-",
"1",
"]",
"+",
"F",
".",
"shape",
"(",
"b",
")",
"[",
":",
"-",
"1",
"]",
")",
"return",
"res"
] | https://github.com/mindspore-ai/mindspore/blob/fb8fd3338605bb34fa5cea054e535a8b1d753fab/mindspore/python/mindspore/numpy/math_ops.py#L693-L754 |
|
wxWidgets/wxPython-Classic | 19571e1ae65f1ac445f5491474121998c97a1bf0 | src/osx_cocoa/_gdi.py | python | StockGDI.GetFont | (*args, **kwargs) | return _gdi_.StockGDI_GetFont(*args, **kwargs) | GetFont(self, int item) -> Font | GetFont(self, int item) -> Font | [
"GetFont",
"(",
"self",
"int",
"item",
")",
"-",
">",
"Font"
] | def GetFont(*args, **kwargs):
"""GetFont(self, int item) -> Font"""
return _gdi_.StockGDI_GetFont(*args, **kwargs) | [
"def",
"GetFont",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_gdi_",
".",
"StockGDI_GetFont",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | https://github.com/wxWidgets/wxPython-Classic/blob/19571e1ae65f1ac445f5491474121998c97a1bf0/src/osx_cocoa/_gdi.py#L6887-L6889 |
|
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pkg_resources/_vendor/pyparsing.py | python | ParserElement.__rxor__ | (self, other ) | return other ^ self | Implementation of ^ operator when left operand is not a C{L{ParserElement}} | Implementation of ^ operator when left operand is not a C{L{ParserElement}} | [
"Implementation",
"of",
"^",
"operator",
"when",
"left",
"operand",
"is",
"not",
"a",
"C",
"{",
"L",
"{",
"ParserElement",
"}}"
] | def __rxor__(self, other ):
"""
Implementation of ^ operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other ^ self | [
"def",
"__rxor__",
"(",
"self",
",",
"other",
")",
":",
"if",
"isinstance",
"(",
"other",
",",
"basestring",
")",
":",
"other",
"=",
"ParserElement",
".",
"_literalStringClass",
"(",
"other",
")",
"if",
"not",
"isinstance",
"(",
"other",
",",
"ParserElement",
")",
":",
"warnings",
".",
"warn",
"(",
"\"Cannot combine element of type %s with ParserElement\"",
"%",
"type",
"(",
"other",
")",
",",
"SyntaxWarning",
",",
"stacklevel",
"=",
"2",
")",
"return",
"None",
"return",
"other",
"^",
"self"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/common-code/Lib/pkg_resources/_vendor/pyparsing.py#L1985-L1995 |
|
zhaoweicai/mscnn | 534bcac5710a579d60827f192035f7eef6d8c585 | python/caffe/pycaffe.py | python | _Net_forward_backward_all | (self, blobs=None, diffs=None, **kwargs) | return all_outs, all_diffs | Run net forward + backward in batches.
Parameters
----------
blobs: list of blobs to extract as in forward()
diffs: list of diffs to extract as in backward()
kwargs: Keys are input (for forward) and output (for backward) blob names
and values are ndarrays. Refer to forward() and backward().
Prefilled variants are called for lack of input or output blobs.
Returns
-------
all_blobs: {blob name: blob ndarray} dict.
all_diffs: {blob name: diff ndarray} dict. | Run net forward + backward in batches. | [
"Run",
"net",
"forward",
"+",
"backward",
"in",
"batches",
"."
] | def _Net_forward_backward_all(self, blobs=None, diffs=None, **kwargs):
"""
Run net forward + backward in batches.
Parameters
----------
blobs: list of blobs to extract as in forward()
diffs: list of diffs to extract as in backward()
kwargs: Keys are input (for forward) and output (for backward) blob names
and values are ndarrays. Refer to forward() and backward().
Prefilled variants are called for lack of input or output blobs.
Returns
-------
all_blobs: {blob name: blob ndarray} dict.
all_diffs: {blob name: diff ndarray} dict.
"""
# Batch blobs and diffs.
all_outs = {out: [] for out in set(self.outputs + (blobs or []))}
all_diffs = {diff: [] for diff in set(self.inputs + (diffs or []))}
forward_batches = self._batch({in_: kwargs[in_]
for in_ in self.inputs if in_ in kwargs})
backward_batches = self._batch({out: kwargs[out]
for out in self.outputs if out in kwargs})
# Collect outputs from batches (and heed lack of forward/backward batches).
for fb, bb in izip_longest(forward_batches, backward_batches, fillvalue={}):
batch_blobs = self.forward(blobs=blobs, **fb)
batch_diffs = self.backward(diffs=diffs, **bb)
for out, out_blobs in six.iteritems(batch_blobs):
all_outs[out].extend(out_blobs.copy())
for diff, out_diffs in six.iteritems(batch_diffs):
all_diffs[diff].extend(out_diffs.copy())
# Package in ndarray.
for out, diff in zip(all_outs, all_diffs):
all_outs[out] = np.asarray(all_outs[out])
all_diffs[diff] = np.asarray(all_diffs[diff])
# Discard padding at the end and package in ndarray.
pad = len(six.next(six.itervalues(all_outs))) - len(six.next(six.itervalues(kwargs)))
if pad:
for out, diff in zip(all_outs, all_diffs):
all_outs[out] = all_outs[out][:-pad]
all_diffs[diff] = all_diffs[diff][:-pad]
return all_outs, all_diffs | [
"def",
"_Net_forward_backward_all",
"(",
"self",
",",
"blobs",
"=",
"None",
",",
"diffs",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# Batch blobs and diffs.",
"all_outs",
"=",
"{",
"out",
":",
"[",
"]",
"for",
"out",
"in",
"set",
"(",
"self",
".",
"outputs",
"+",
"(",
"blobs",
"or",
"[",
"]",
")",
")",
"}",
"all_diffs",
"=",
"{",
"diff",
":",
"[",
"]",
"for",
"diff",
"in",
"set",
"(",
"self",
".",
"inputs",
"+",
"(",
"diffs",
"or",
"[",
"]",
")",
")",
"}",
"forward_batches",
"=",
"self",
".",
"_batch",
"(",
"{",
"in_",
":",
"kwargs",
"[",
"in_",
"]",
"for",
"in_",
"in",
"self",
".",
"inputs",
"if",
"in_",
"in",
"kwargs",
"}",
")",
"backward_batches",
"=",
"self",
".",
"_batch",
"(",
"{",
"out",
":",
"kwargs",
"[",
"out",
"]",
"for",
"out",
"in",
"self",
".",
"outputs",
"if",
"out",
"in",
"kwargs",
"}",
")",
"# Collect outputs from batches (and heed lack of forward/backward batches).",
"for",
"fb",
",",
"bb",
"in",
"izip_longest",
"(",
"forward_batches",
",",
"backward_batches",
",",
"fillvalue",
"=",
"{",
"}",
")",
":",
"batch_blobs",
"=",
"self",
".",
"forward",
"(",
"blobs",
"=",
"blobs",
",",
"*",
"*",
"fb",
")",
"batch_diffs",
"=",
"self",
".",
"backward",
"(",
"diffs",
"=",
"diffs",
",",
"*",
"*",
"bb",
")",
"for",
"out",
",",
"out_blobs",
"in",
"six",
".",
"iteritems",
"(",
"batch_blobs",
")",
":",
"all_outs",
"[",
"out",
"]",
".",
"extend",
"(",
"out_blobs",
".",
"copy",
"(",
")",
")",
"for",
"diff",
",",
"out_diffs",
"in",
"six",
".",
"iteritems",
"(",
"batch_diffs",
")",
":",
"all_diffs",
"[",
"diff",
"]",
".",
"extend",
"(",
"out_diffs",
".",
"copy",
"(",
")",
")",
"# Package in ndarray.",
"for",
"out",
",",
"diff",
"in",
"zip",
"(",
"all_outs",
",",
"all_diffs",
")",
":",
"all_outs",
"[",
"out",
"]",
"=",
"np",
".",
"asarray",
"(",
"all_outs",
"[",
"out",
"]",
")",
"all_diffs",
"[",
"diff",
"]",
"=",
"np",
".",
"asarray",
"(",
"all_diffs",
"[",
"diff",
"]",
")",
"# Discard padding at the end and package in ndarray.",
"pad",
"=",
"len",
"(",
"six",
".",
"next",
"(",
"six",
".",
"itervalues",
"(",
"all_outs",
")",
")",
")",
"-",
"len",
"(",
"six",
".",
"next",
"(",
"six",
".",
"itervalues",
"(",
"kwargs",
")",
")",
")",
"if",
"pad",
":",
"for",
"out",
",",
"diff",
"in",
"zip",
"(",
"all_outs",
",",
"all_diffs",
")",
":",
"all_outs",
"[",
"out",
"]",
"=",
"all_outs",
"[",
"out",
"]",
"[",
":",
"-",
"pad",
"]",
"all_diffs",
"[",
"diff",
"]",
"=",
"all_diffs",
"[",
"diff",
"]",
"[",
":",
"-",
"pad",
"]",
"return",
"all_outs",
",",
"all_diffs"
] | https://github.com/zhaoweicai/mscnn/blob/534bcac5710a579d60827f192035f7eef6d8c585/python/caffe/pycaffe.py#L206-L248 |
|
PX4/PX4-Autopilot | 0b9f60a0370be53d683352c63fd92db3d6586e18 | Tools/mavlink_px4.py | python | MAVLink.send | (self, mavmsg) | send a MAVLink message | send a MAVLink message | [
"send",
"a",
"MAVLink",
"message"
] | def send(self, mavmsg):
'''send a MAVLink message'''
buf = mavmsg.pack(self)
self.file.write(buf)
self.seq = (self.seq + 1) % 255
self.total_packets_sent += 1
self.total_bytes_sent += len(buf) | [
"def",
"send",
"(",
"self",
",",
"mavmsg",
")",
":",
"buf",
"=",
"mavmsg",
".",
"pack",
"(",
"self",
")",
"self",
".",
"file",
".",
"write",
"(",
"buf",
")",
"self",
".",
"seq",
"=",
"(",
"self",
".",
"seq",
"+",
"1",
")",
"%",
"255",
"self",
".",
"total_packets_sent",
"+=",
"1",
"self",
".",
"total_bytes_sent",
"+=",
"len",
"(",
"buf",
")"
] | https://github.com/PX4/PX4-Autopilot/blob/0b9f60a0370be53d683352c63fd92db3d6586e18/Tools/mavlink_px4.py#L2259-L2265 |
||
aws/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/typing/context.py | python | CallStack.match | (self, py_func, args) | Returns first function that matches *py_func* and the arguments types in
*args*; or, None if no match. | Returns first function that matches *py_func* and the arguments types in
*args*; or, None if no match. | [
"Returns",
"first",
"function",
"that",
"matches",
"*",
"py_func",
"*",
"and",
"the",
"arguments",
"types",
"in",
"*",
"args",
"*",
";",
"or",
"None",
"if",
"no",
"match",
"."
] | def match(self, py_func, args):
"""
Returns first function that matches *py_func* and the arguments types in
*args*; or, None if no match.
"""
for frame in self.finditer(py_func):
if frame.args == args:
return frame | [
"def",
"match",
"(",
"self",
",",
"py_func",
",",
"args",
")",
":",
"for",
"frame",
"in",
"self",
".",
"finditer",
"(",
"py_func",
")",
":",
"if",
"frame",
".",
"args",
"==",
"args",
":",
"return",
"frame"
] | https://github.com/aws/lumberyard/blob/f85344403c1c2e77ec8c75deb2c116e97b713217/dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/typing/context.py#L95-L102 |
||
RegrowthStudios/SoACode-Public | c3ddd69355b534d5e70e2e6d0c489b4e93ab1ffe | utils/git-hooks/cpplint/cpplint.py | python | _FunctionState.Check | (self, error, filename, linenum) | Report if too many lines in function body.
Args:
error: The function to call with any errors found.
filename: The name of the current file.
linenum: The number of the line to check. | Report if too many lines in function body. | [
"Report",
"if",
"too",
"many",
"lines",
"in",
"function",
"body",
"."
] | def Check(self, error, filename, linenum):
"""Report if too many lines in function body.
Args:
error: The function to call with any errors found.
filename: The name of the current file.
linenum: The number of the line to check.
"""
if Match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = base_trigger * 2**_VerboseLevel()
if self.lines_in_function > trigger:
error_level = int(math.log(self.lines_in_function / base_trigger, 2))
# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
if error_level > 5:
error_level = 5
error(filename, linenum, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
' %s has %d non-comment lines'
' (error triggered by exceeding %d lines).' % (
self.current_function, self.lines_in_function, trigger)) | [
"def",
"Check",
"(",
"self",
",",
"error",
",",
"filename",
",",
"linenum",
")",
":",
"if",
"Match",
"(",
"r'T(EST|est)'",
",",
"self",
".",
"current_function",
")",
":",
"base_trigger",
"=",
"self",
".",
"_TEST_TRIGGER",
"else",
":",
"base_trigger",
"=",
"self",
".",
"_NORMAL_TRIGGER",
"trigger",
"=",
"base_trigger",
"*",
"2",
"**",
"_VerboseLevel",
"(",
")",
"if",
"self",
".",
"lines_in_function",
">",
"trigger",
":",
"error_level",
"=",
"int",
"(",
"math",
".",
"log",
"(",
"self",
".",
"lines_in_function",
"/",
"base_trigger",
",",
"2",
")",
")",
"# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...",
"if",
"error_level",
">",
"5",
":",
"error_level",
"=",
"5",
"error",
"(",
"filename",
",",
"linenum",
",",
"'readability/fn_size'",
",",
"error_level",
",",
"'Small and focused functions are preferred:'",
"' %s has %d non-comment lines'",
"' (error triggered by exceeding %d lines).'",
"%",
"(",
"self",
".",
"current_function",
",",
"self",
".",
"lines_in_function",
",",
"trigger",
")",
")"
] | https://github.com/RegrowthStudios/SoACode-Public/blob/c3ddd69355b534d5e70e2e6d0c489b4e93ab1ffe/utils/git-hooks/cpplint/cpplint.py#L660-L683 |
Subsets and Splits