id_within_dataset
int64
46
2.71M
snippet
stringlengths
63
481k
tokens
sequencelengths
20
15.6k
language
stringclasses
2 values
nl
stringlengths
1
32.4k
is_duplicated
bool
2 classes
2,470,871
def step_file_should_not_contain_log_records(context, filename): """ Verifies that the command output contains the specified log records (in any order). .. code-block: gherkin Then the file "xxx.log" should not contain the log records: | category | level | message | | bar | CURRENT | xxx | """ assert context.table, "REQUIRE: context.table" context.table.require_columns(["category", "level", "message"]) format = getattr(context, "log_record_format", context.config.logging_format) for row in context.table.rows: output = LogRecordTable.make_output_for_row(row, format) context.text = output step_file_should_not_contain_multiline_text(context, filename)
[ "def", "step_file_should_not_contain_log_records", "(", "context", ",", "filename", ")", ":", "assert", "context", ".", "table", ",", "\"REQUIRE: context.table\"", "context", ".", "table", ".", "require_columns", "(", "[", "\"category\"", ",", "\"level\"", ",", "\"message\"", "]", ")", "format", "=", "getattr", "(", "context", ",", "\"log_record_format\"", ",", "context", ".", "config", ".", "logging_format", ")", "for", "row", "in", "context", ".", "table", ".", "rows", ":", "output", "=", "LogRecordTable", ".", "make_output_for_row", "(", "row", ",", "format", ")", "context", ".", "text", "=", "output", "step_file_should_not_contain_multiline_text", "(", "context", ",", "filename", ")" ]
python
Verifies that the command output contains the specified log records (in any order). .. code-block: gherkin Then the file "xxx.log" should not contain the log records: | category | level | message | | bar | CURRENT | xxx |
true
2,470,872
def step_use_log_record_configuration(context): """ Define log record configuration parameters. .. code-block: gherkin Given I use the log record configuration: | property | value | | format | | | datefmt | | """ assert context.table, "REQUIRE: context.table" context.table.require_columns(["property", "value"]) for row in context.table.rows: property_name = row["property"] value = row["value"] if property_name == "format": context.log_record_format = value elif property_name == "datefmt": context.log_record_datefmt = value else: raise KeyError("Unknown property=%s" % property_name)
[ "def", "step_use_log_record_configuration", "(", "context", ")", ":", "assert", "context", ".", "table", ",", "\"REQUIRE: context.table\"", "context", ".", "table", ".", "require_columns", "(", "[", "\"property\"", ",", "\"value\"", "]", ")", "for", "row", "in", "context", ".", "table", ".", "rows", ":", "property_name", "=", "row", "[", "\"property\"", "]", "value", "=", "row", "[", "\"value\"", "]", "if", "property_name", "==", "\"format\"", ":", "context", ".", "log_record_format", "=", "value", "elif", "property_name", "==", "\"datefmt\"", ":", "context", ".", "log_record_datefmt", "=", "value", "else", ":", "raise", "KeyError", "(", "\"Unknown property=%s\"", "%", "property_name", ")" ]
python
Define log record configuration parameters. .. code-block: gherkin Given I use the log record configuration: | property | value | | format | | | datefmt | |
true
2,471,123
def step_a_new_working_directory(context): """ Creates a new, empty working directory """ command_util.ensure_context_attribute_exists(context, "workdir", None) command_util.ensure_workdir_exists(context) shutil.rmtree(context.workdir, ignore_errors=True)
[ "def", "step_a_new_working_directory", "(", "context", ")", ":", "command_util", ".", "ensure_context_attribute_exists", "(", "context", ",", "\"workdir\"", ",", "None", ")", "command_util", ".", "ensure_workdir_exists", "(", "context", ")", "shutil", ".", "rmtree", "(", "context", ".", "workdir", ",", "ignore_errors", "=", "True", ")" ]
python
Creates a new, empty working directory
true
2,471,124
def step_use_curdir_as_working_directory(context): """ Uses the current directory as working directory """ context.workdir = os.path.abspath(".") command_util.ensure_workdir_exists(context)
[ "def", "step_use_curdir_as_working_directory", "(", "context", ")", ":", "context", ".", "workdir", "=", "os", ".", "path", ".", "abspath", "(", "\".\"", ")", "command_util", ".", "ensure_workdir_exists", "(", "context", ")" ]
python
Uses the current directory as working directory
true
2,471,125
def step_a_file_named_filename_and_encoding_with(context, filename, encoding): """Creates a textual file with the content provided as docstring.""" __encoding_is_valid = True assert context.text is not None, "ENSURE: multiline text is provided." assert not os.path.isabs(filename) assert __encoding_is_valid command_util.ensure_workdir_exists(context) filename2 = os.path.join(context.workdir, filename) pathutil.create_textfile_with_contents(filename2, context.text, encoding)
[ "def", "step_a_file_named_filename_and_encoding_with", "(", "context", ",", "filename", ",", "encoding", ")", ":", "__encoding_is_valid", "=", "True", "assert", "context", ".", "text", "is", "not", "None", ",", "\"ENSURE: multiline text is provided.\"", "assert", "not", "os", ".", "path", ".", "isabs", "(", "filename", ")", "assert", "__encoding_is_valid", "command_util", ".", "ensure_workdir_exists", "(", "context", ")", "filename2", "=", "os", ".", "path", ".", "join", "(", "context", ".", "workdir", ",", "filename", ")", "pathutil", ".", "create_textfile_with_contents", "(", "filename2", ",", "context", ".", "text", ",", "encoding", ")" ]
python
Creates a textual file with the content provided as docstring.
true
2,471,126
def step_a_file_named_filename_with(context, filename): """Creates a textual file with the content provided as docstring.""" step_a_file_named_filename_and_encoding_with(context, filename, "UTF-8") # -- SPECIAL CASE: For usage with behave steps. if filename.endswith(".feature"): command_util.ensure_context_attribute_exists(context, "features", []) context.features.append(filename)
[ "def", "step_a_file_named_filename_with", "(", "context", ",", "filename", ")", ":", "step_a_file_named_filename_and_encoding_with", "(", "context", ",", "filename", ",", "\"UTF-8\"", ")", "if", "filename", ".", "endswith", "(", "\".feature\"", ")", ":", "command_util", ".", "ensure_context_attribute_exists", "(", "context", ",", "\"features\"", ",", "[", "]", ")", "context", ".", "features", ".", "append", "(", "filename", ")" ]
python
Creates a textual file with the content provided as docstring.
true
2,471,127
def step_an_empty_file_named_filename(context, filename): """ Creates an empty file. """ assert not os.path.isabs(filename) command_util.ensure_workdir_exists(context) filename2 = os.path.join(context.workdir, filename) pathutil.create_textfile_with_contents(filename2, "")
[ "def", "step_an_empty_file_named_filename", "(", "context", ",", "filename", ")", ":", "assert", "not", "os", ".", "path", ".", "isabs", "(", "filename", ")", "command_util", ".", "ensure_workdir_exists", "(", "context", ")", "filename2", "=", "os", ".", "path", ".", "join", "(", "context", ".", "workdir", ",", "filename", ")", "pathutil", ".", "create_textfile_with_contents", "(", "filename2", ",", "\"\"", ")" ]
python
Creates an empty file.
true
2,471,133
def step_it_should_pass_with(context): ''' EXAMPLE: ... when I run "behave ..." then it should pass with: """ TEXT """ ''' assert context.text is not None, "ENSURE: multiline text is provided." step_command_output_should_contain(context) assert_that(context.command_result.returncode, equal_to(0), context.command_result.output)
[ "def", "step_it_should_pass_with", "(", "context", ")", ":", "assert", "context", ".", "text", "is", "not", "None", ",", "\"ENSURE: multiline text is provided.\"", "step_command_output_should_contain", "(", "context", ")", "assert_that", "(", "context", ".", "command_result", ".", "returncode", ",", "equal_to", "(", "0", ")", ",", "context", ".", "command_result", ".", "output", ")" ]
python
EXAMPLE: ... when I run "behave ..." then it should pass with: """ TEXT """
true
2,471,134
def step_it_should_fail_with(context): ''' EXAMPLE: ... when I run "behave ..." then it should fail with: """ TEXT """ ''' assert context.text is not None, "ENSURE: multiline text is provided." step_command_output_should_contain(context) assert_that(context.command_result.returncode, is_not(equal_to(0)))
[ "def", "step_it_should_fail_with", "(", "context", ")", ":", "assert", "context", ".", "text", "is", "not", "None", ",", "\"ENSURE: multiline text is provided.\"", "step_command_output_should_contain", "(", "context", ")", "assert_that", "(", "context", ".", "command_result", ".", "returncode", ",", "is_not", "(", "equal_to", "(", "0", ")", ")", ")" ]
python
EXAMPLE: ... when I run "behave ..." then it should fail with: """ TEXT """
true
2,471,135
def step_command_output_should_contain_text(context, text): ''' EXAMPLE: ... Then the command output should contain "TEXT" ''' expected_text = text if "{__WORKDIR__}" in expected_text or "{__CWD__}" in expected_text: expected_text = textutil.template_substitute(text, __WORKDIR__ = posixpath_normpath(context.workdir), __CWD__ = posixpath_normpath(os.getcwd()) ) actual_output = context.command_result.output with on_assert_failed_print_details(actual_output, expected_text): textutil.assert_normtext_should_contain(actual_output, expected_text)
[ "def", "step_command_output_should_contain_text", "(", "context", ",", "text", ")", ":", "expected_text", "=", "text", "if", "\"{__WORKDIR__}\"", "in", "expected_text", "or", "\"{__CWD__}\"", "in", "expected_text", ":", "expected_text", "=", "textutil", ".", "template_substitute", "(", "text", ",", "__WORKDIR__", "=", "posixpath_normpath", "(", "context", ".", "workdir", ")", ",", "__CWD__", "=", "posixpath_normpath", "(", "os", ".", "getcwd", "(", ")", ")", ")", "actual_output", "=", "context", ".", "command_result", ".", "output", "with", "on_assert_failed_print_details", "(", "actual_output", ",", "expected_text", ")", ":", "textutil", ".", "assert_normtext_should_contain", "(", "actual_output", ",", "expected_text", ")" ]
python
EXAMPLE: ... Then the command output should contain "TEXT"
true
2,471,136
def step_command_output_should_not_contain_text(context, text): ''' EXAMPLE: ... then the command output should not contain "TEXT" ''' expected_text = text if "{__WORKDIR__}" in text or "{__CWD__}" in text: expected_text = textutil.template_substitute(text, __WORKDIR__ = posixpath_normpath(context.workdir), __CWD__ = posixpath_normpath(os.getcwd()) ) actual_output = context.command_result.output with on_assert_failed_print_details(actual_output, expected_text): textutil.assert_normtext_should_not_contain(actual_output, expected_text)
[ "def", "step_command_output_should_not_contain_text", "(", "context", ",", "text", ")", ":", "expected_text", "=", "text", "if", "\"{__WORKDIR__}\"", "in", "text", "or", "\"{__CWD__}\"", "in", "text", ":", "expected_text", "=", "textutil", ".", "template_substitute", "(", "text", ",", "__WORKDIR__", "=", "posixpath_normpath", "(", "context", ".", "workdir", ")", ",", "__CWD__", "=", "posixpath_normpath", "(", "os", ".", "getcwd", "(", ")", ")", ")", "actual_output", "=", "context", ".", "command_result", ".", "output", "with", "on_assert_failed_print_details", "(", "actual_output", ",", "expected_text", ")", ":", "textutil", ".", "assert_normtext_should_not_contain", "(", "actual_output", ",", "expected_text", ")" ]
python
EXAMPLE: ... then the command output should not contain "TEXT"
true
2,471,137
def step_command_output_should_contain_exactly_text(context, text): """ Verifies that the command output of the last command contains the expected text. .. code-block:: gherkin When I run "echo Hello" Then the command output should contain "Hello" """ expected_text = text if "{__WORKDIR__}" in text or "{__CWD__}" in text: expected_text = textutil.template_substitute(text, __WORKDIR__ = posixpath_normpath(context.workdir), __CWD__ = posixpath_normpath(os.getcwd()) ) actual_output = context.command_result.output textutil.assert_text_should_contain_exactly(actual_output, expected_text)
[ "def", "step_command_output_should_contain_exactly_text", "(", "context", ",", "text", ")", ":", "expected_text", "=", "text", "if", "\"{__WORKDIR__}\"", "in", "text", "or", "\"{__CWD__}\"", "in", "text", ":", "expected_text", "=", "textutil", ".", "template_substitute", "(", "text", ",", "__WORKDIR__", "=", "posixpath_normpath", "(", "context", ".", "workdir", ")", ",", "__CWD__", "=", "posixpath_normpath", "(", "os", ".", "getcwd", "(", ")", ")", ")", "actual_output", "=", "context", ".", "command_result", ".", "output", "textutil", ".", "assert_text_should_contain_exactly", "(", "actual_output", ",", "expected_text", ")" ]
python
Verifies that the command output of the last command contains the expected text. .. code-block:: gherkin When I run "echo Hello" Then the command output should contain "Hello"
true
2,473,742
def behave(cmdline, cwd=".", **kwargs): """ Run behave as subprocess command and return process/shell instance with results (collected output, returncode). """ assert isinstance(cmdline, six.string_types) return run("behave " + cmdline, cwd=cwd, **kwargs)
[ "def", "behave", "(", "cmdline", ",", "cwd", "=", "\".\"", ",", "**", "kwargs", ")", ":", "assert", "isinstance", "(", "cmdline", ",", "six", ".", "string_types", ")", "return", "run", "(", "\"behave \"", "+", "cmdline", ",", "cwd", "=", "cwd", ",", "**", "kwargs", ")" ]
python
Run behave as subprocess command and return process/shell instance with results (collected output, returncode).
true
2,473,964
def realpath_with_context(path, context): """ Convert a path into its realpath: * For relative path: use :attr:`context.workdir` as root directory * For absolute path: Pass-through without any changes. :param path: Filepath to convert (as string). :param context: Behave context object (with :attr:`context.workdir`) :return: Converted path. """ if not os.path.isabs(path): # XXX ensure_workdir_exists(context) assert context.workdir path = os.path.join(context.workdir, os.path.normpath(path)) return path
[ "def", "realpath_with_context", "(", "path", ",", "context", ")", ":", "if", "not", "os", ".", "path", ".", "isabs", "(", "path", ")", ":", "assert", "context", ".", "workdir", "path", "=", "os", ".", "path", ".", "join", "(", "context", ".", "workdir", ",", "os", ".", "path", ".", "normpath", "(", "path", ")", ")", "return", "path" ]
python
Convert a path into its realpath: * For relative path: use :attr:`context.workdir` as root directory * For absolute path: Pass-through without any changes. :param path: Filepath to convert (as string). :param context: Behave context object (with :attr:`context.workdir`) :return: Converted path.
true
2,473,965
def posixpath_normpath(pathname): """ Convert path into POSIX path: * Normalize path * Replace backslash with slash :param pathname: Pathname (as string) :return: Normalized POSIX path. """ backslash = '\\' pathname2 = os.path.normpath(pathname) or "." if backslash in pathname2: pathname2 = pathname2.replace(backslash, '/') return pathname2
[ "def", "posixpath_normpath", "(", "pathname", ")", ":", "backslash", "=", "'\\\\'", "pathname2", "=", "os", ".", "path", ".", "normpath", "(", "pathname", ")", "or", "\".\"", "if", "backslash", "in", "pathname2", ":", "pathname2", "=", "pathname2", ".", "replace", "(", "backslash", ",", "'/'", ")", "return", "pathname2" ]
python
Convert path into POSIX path: * Normalize path * Replace backslash with slash :param pathname: Pathname (as string) :return: Normalized POSIX path.
true
2,473,967
def create_textfile_with_contents(filename, contents, encoding='utf-8'): """ Creates a textual file with the provided contents in the workdir. Overwrites an existing file. """ ensure_directory_exists(os.path.dirname(filename)) if os.path.exists(filename): os.remove(filename) outstream = codecs.open(filename, "w", encoding) outstream.write(contents) if contents and not contents.endswith("\n"): outstream.write("\n") outstream.flush() outstream.close() assert os.path.exists(filename), "ENSURE file exists: %s" % filename
[ "def", "create_textfile_with_contents", "(", "filename", ",", "contents", ",", "encoding", "=", "'utf-8'", ")", ":", "ensure_directory_exists", "(", "os", ".", "path", ".", "dirname", "(", "filename", ")", ")", "if", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "os", ".", "remove", "(", "filename", ")", "outstream", "=", "codecs", ".", "open", "(", "filename", ",", "\"w\"", ",", "encoding", ")", "outstream", ".", "write", "(", "contents", ")", "if", "contents", "and", "not", "contents", ".", "endswith", "(", "\"\\n\"", ")", ":", "outstream", ".", "write", "(", "\"\\n\"", ")", "outstream", ".", "flush", "(", ")", "outstream", ".", "close", "(", ")", "assert", "os", ".", "path", ".", "exists", "(", "filename", ")", ",", "\"ENSURE file exists: %s\"", "%", "filename" ]
python
Creates a textual file with the provided contents in the workdir. Overwrites an existing file.
true
2,475,627
def ensure_context_attribute_exists(context, name, default_value=None): """ Ensure a behave resource exists as attribute in the behave context. If this is not the case, the attribute is created by using the default_value. """ if not hasattr(context, name): setattr(context, name, default_value)
[ "def", "ensure_context_attribute_exists", "(", "context", ",", "name", ",", "default_value", "=", "None", ")", ":", "if", "not", "hasattr", "(", "context", ",", "name", ")", ":", "setattr", "(", "context", ",", "name", ",", "default_value", ")" ]
python
Ensure a behave resource exists as attribute in the behave context. If this is not the case, the attribute is created by using the default_value.
true
2,475,628
def ensure_workdir_exists(context): """ Ensures that the work directory exists. In addition, the location of the workdir is stored as attribute in the context object. """ ensure_context_attribute_exists(context, "workdir", None) if not context.workdir: context.workdir = os.path.abspath(WORKDIR) pathutil.ensure_directory_exists(context.workdir)
[ "def", "ensure_workdir_exists", "(", "context", ")", ":", "ensure_context_attribute_exists", "(", "context", ",", "\"workdir\"", ",", "None", ")", "if", "not", "context", ".", "workdir", ":", "context", ".", "workdir", "=", "os", ".", "path", ".", "abspath", "(", "WORKDIR", ")", "pathutil", ".", "ensure_directory_exists", "(", "context", ".", "workdir", ")" ]
python
Ensures that the work directory exists. In addition, the location of the workdir is stored as attribute in the context object.
true
2,476,039
def template_substitute(text, **kwargs): """ Replace placeholders in text by using the data mapping. Other placeholders that is not represented by data is left untouched. :param text: Text to search and replace placeholders. :param data: Data mapping/dict for placeholder key and values. :return: Potentially modified text with replaced placeholders. """ for name, value in kwargs.items(): placeholder_pattern = "{%s}" % name if placeholder_pattern in text: text = text.replace(placeholder_pattern, value) return text
[ "def", "template_substitute", "(", "text", ",", "**", "kwargs", ")", ":", "for", "name", ",", "value", "in", "kwargs", ".", "items", "(", ")", ":", "placeholder_pattern", "=", "\"{%s}\"", "%", "name", "if", "placeholder_pattern", "in", "text", ":", "text", "=", "text", ".", "replace", "(", "placeholder_pattern", ",", "value", ")", "return", "text" ]
python
Replace placeholders in text by using the data mapping. Other placeholders that is not represented by data is left untouched. :param text: Text to search and replace placeholders. :param data: Data mapping/dict for placeholder key and values. :return: Potentially modified text with replaced placeholders.
true
2,476,040
def text_remove_empty_lines(text): """ Whitespace normalization: - Strip empty lines - Strip trailing whitespace """ lines = [ line.rstrip() for line in text.splitlines() if line.strip() ] return "\n".join(lines)
[ "def", "text_remove_empty_lines", "(", "text", ")", ":", "lines", "=", "[", "line", ".", "rstrip", "(", ")", "for", "line", "in", "text", ".", "splitlines", "(", ")", "if", "line", ".", "strip", "(", ")", "]", "return", "\"\\n\"", ".", "join", "(", "lines", ")" ]
python
Whitespace normalization: - Strip empty lines - Strip trailing whitespace
true
2,476,041
def text_normalize(text): """ Whitespace normalization: - Strip empty lines - Strip leading whitespace in a line - Strip trailing whitespace in a line - Normalize line endings """ # if not isinstance(text, str): if isinstance(text, bytes): # -- MAYBE: command.ouput => bytes, encoded stream output. text = codecs.decode(text) lines = [ line.strip() for line in text.splitlines() if line.strip() ] return "\n".join(lines)
[ "def", "text_normalize", "(", "text", ")", ":", "if", "isinstance", "(", "text", ",", "bytes", ")", ":", "text", "=", "codecs", ".", "decode", "(", "text", ")", "lines", "=", "[", "line", ".", "strip", "(", ")", "for", "line", "in", "text", ".", "splitlines", "(", ")", "if", "line", ".", "strip", "(", ")", "]", "return", "\"\\n\"", ".", "join", "(", "lines", ")" ]
python
Whitespace normalization: - Strip empty lines - Strip leading whitespace in a line - Strip trailing whitespace in a line - Normalize line endings
true
2,477,676
def _VarintEncoder(): """Return an encoder for a basic varint value.""" local_chr = chr def EncodeVarint(write, value): bits = value & 0x7f value >>= 7 while value: write(0x80|bits) bits = value & 0x7f value >>= 7 return write(bits) return EncodeVarint
[ "def", "_VarintEncoder", "(", ")", ":", "local_chr", "=", "chr", "def", "EncodeVarint", "(", "write", ",", "value", ")", ":", "bits", "=", "value", "&", "0x7f", "value", ">>=", "7", "while", "value", ":", "write", "(", "0x80", "|", "bits", ")", "bits", "=", "value", "&", "0x7f", "value", ">>=", "7", "return", "write", "(", "bits", ")", "return", "EncodeVarint" ]
python
Return an encoder for a basic varint value.
true
2,477,677
def _SignedVarintEncoder(): """Return an encoder for a basic signed varint value.""" local_chr = chr def EncodeSignedVarint(write, value): if value < 0: value += (1 << 64) bits = value & 0x7f value >>= 7 while value: write(0x80|bits) bits = value & 0x7f value >>= 7 return write(bits) return EncodeSignedVarint
[ "def", "_SignedVarintEncoder", "(", ")", ":", "local_chr", "=", "chr", "def", "EncodeSignedVarint", "(", "write", ",", "value", ")", ":", "if", "value", "<", "0", ":", "value", "+=", "(", "1", "<<", "64", ")", "bits", "=", "value", "&", "0x7f", "value", ">>=", "7", "while", "value", ":", "write", "(", "0x80", "|", "bits", ")", "bits", "=", "value", "&", "0x7f", "value", ">>=", "7", "return", "write", "(", "bits", ")", "return", "EncodeSignedVarint" ]
python
Return an encoder for a basic signed varint value.
true
2,479,943
def connect_db(config): """Connects to the specific database.""" rv = sqlite3.connect(config["database"]["uri"]) rv.row_factory = sqlite3.Row return rv
[ "def", "connect_db", "(", "config", ")", ":", "rv", "=", "sqlite3", ".", "connect", "(", "config", "[", "\"database\"", "]", "[", "\"uri\"", "]", ")", "rv", ".", "row_factory", "=", "sqlite3", ".", "Row", "return", "rv" ]
python
Connects to the specific database.
true
2,480,590
def strip_leading_comments(text): """Strips the leading whitespaces and % from the given text. Adapted from textwrap.dedent """ # Look for the longest leading string of spaces and tabs common to # all lines. margin = None text = _whitespace_only_re.sub('', text) indents = _leading_whitespace_re.findall(text) for indent in indents: if margin is None: margin = indent # Current line more deeply indented than previous winner: # no change (previous winner is still on top). elif indent.startswith(margin): pass # Current line consistent with and no deeper than previous winner: # it's the new winner. elif margin.startswith(indent): margin = indent # Current line and previous winner have no common whitespace: # there is no margin. else: margin = "" break # sanity check (testing/debugging only) if 0 and margin: for line in text.split("\n"): assert not line or line.startswith(margin), \ "line = %r, margin = %r" % (line, margin) if margin: text = re.sub(r'(?m)^' + margin, '', text) return text
[ "def", "strip_leading_comments", "(", "text", ")", ":", "margin", "=", "None", "text", "=", "_whitespace_only_re", ".", "sub", "(", "''", ",", "text", ")", "indents", "=", "_leading_whitespace_re", ".", "findall", "(", "text", ")", "for", "indent", "in", "indents", ":", "if", "margin", "is", "None", ":", "margin", "=", "indent", "elif", "indent", ".", "startswith", "(", "margin", ")", ":", "pass", "elif", "margin", ".", "startswith", "(", "indent", ")", ":", "margin", "=", "indent", "else", ":", "margin", "=", "\"\"", "break", "if", "0", "and", "margin", ":", "for", "line", "in", "text", ".", "split", "(", "\"\\n\"", ")", ":", "assert", "not", "line", "or", "line", ".", "startswith", "(", "margin", ")", ",", "\"line = %r, margin = %r\"", "%", "(", "line", ",", "margin", ")", "if", "margin", ":", "text", "=", "re", ".", "sub", "(", "r'(?m)^'", "+", "margin", ",", "''", ",", "text", ")", "return", "text" ]
python
Strips the leading whitespaces and % from the given text. Adapted from textwrap.dedent
true
2,480,845
def inherit_docstring_from(cls): """ This decorator modifies the decorated function's docstring by replacing occurrences of '%(super)s' with the docstring of the method of the same name from the class `cls`. If the decorated method has no docstring, it is simply given the docstring of cls method. Extracted from scipy.misc.doccer. """ def _doc(func): cls_docstring = getattr(cls, func.__name__).__doc__ func_docstring = func.__doc__ if func_docstring is None: func.__doc__ = cls_docstring else: new_docstring = func_docstring % dict(super=cls_docstring) func.__doc__ = new_docstring return func return _doc
[ "def", "inherit_docstring_from", "(", "cls", ")", ":", "def", "_doc", "(", "func", ")", ":", "cls_docstring", "=", "getattr", "(", "cls", ",", "func", ".", "__name__", ")", ".", "__doc__", "func_docstring", "=", "func", ".", "__doc__", "if", "func_docstring", "is", "None", ":", "func", ".", "__doc__", "=", "cls_docstring", "else", ":", "new_docstring", "=", "func_docstring", "%", "dict", "(", "super", "=", "cls_docstring", ")", "func", ".", "__doc__", "=", "new_docstring", "return", "func", "return", "_doc" ]
python
This decorator modifies the decorated function's docstring by replacing occurrences of '%(super)s' with the docstring of the method of the same name from the class `cls`. If the decorated method has no docstring, it is simply given the docstring of cls method. Extracted from scipy.misc.doccer.
true
2,481,822
def make_step( net, step_size = 1.5, end = "inception_4c/output", jitter = 32, clip = True, objective = objective_L2 ): ''' basic gradient ascent step ''' src = net.blobs["data"] dst = net.blobs[end] ox, oy = np.random.randint(- jitter, jitter + 1, 2) # Apply jitter shift. src.data[0] = np.roll(np.roll(src.data[0], ox, -1), oy, -2) # Specify the optimisation objective. net.forward(end = end) objective(dst) net.backward(start = end) g = src.diff[0] # Apply normalised ascent step to the input image. src.data[:] += step_size / np.abs(g).mean() * g # Unshift the image. src.data[0] = np.roll(np.roll(src.data[0], - ox, - 1), -oy, -2) if clip: bias = net.transformer.mean["data"] src.data[:] = np.clip(src.data, -bias, 255-bias)
[ "def", "make_step", "(", "net", ",", "step_size", "=", "1.5", ",", "end", "=", "\"inception_4c/output\"", ",", "jitter", "=", "32", ",", "clip", "=", "True", ",", "objective", "=", "objective_L2", ")", ":", "src", "=", "net", ".", "blobs", "[", "\"data\"", "]", "dst", "=", "net", ".", "blobs", "[", "end", "]", "ox", ",", "oy", "=", "np", ".", "random", ".", "randint", "(", "-", "jitter", ",", "jitter", "+", "1", ",", "2", ")", "src", ".", "data", "[", "0", "]", "=", "np", ".", "roll", "(", "np", ".", "roll", "(", "src", ".", "data", "[", "0", "]", ",", "ox", ",", "-", "1", ")", ",", "oy", ",", "-", "2", ")", "net", ".", "forward", "(", "end", "=", "end", ")", "objective", "(", "dst", ")", "net", ".", "backward", "(", "start", "=", "end", ")", "g", "=", "src", ".", "diff", "[", "0", "]", "src", ".", "data", "[", ":", "]", "+=", "step_size", "/", "np", ".", "abs", "(", "g", ")", ".", "mean", "(", ")", "*", "g", "src", ".", "data", "[", "0", "]", "=", "np", ".", "roll", "(", "np", ".", "roll", "(", "src", ".", "data", "[", "0", "]", ",", "-", "ox", ",", "-", "1", ")", ",", "-", "oy", ",", "-", "2", ")", "if", "clip", ":", "bias", "=", "net", ".", "transformer", ".", "mean", "[", "\"data\"", "]", "src", ".", "data", "[", ":", "]", "=", "np", ".", "clip", "(", "src", ".", "data", ",", "-", "bias", ",", "255", "-", "bias", ")" ]
python
basic gradient ascent step
true
2,483,064
def init_celery(app, celery): """ Initialise Celery and set up logging :param app: Flask app :param celery: Celery instance """ celery.conf.update(app.config) TaskBase = celery.Task class ContextTask(TaskBase): abstract = True def __call__(self, *args, **kwargs): with app.app_context(): return TaskBase.__call__(self, *args, **kwargs) celery.Task = ContextTask return celery
[ "def", "init_celery", "(", "app", ",", "celery", ")", ":", "celery", ".", "conf", ".", "update", "(", "app", ".", "config", ")", "TaskBase", "=", "celery", ".", "Task", "class", "ContextTask", "(", "TaskBase", ")", ":", "abstract", "=", "True", "def", "__call__", "(", "self", ",", "*", "args", ",", "**", "kwargs", ")", ":", "with", "app", ".", "app_context", "(", ")", ":", "return", "TaskBase", ".", "__call__", "(", "self", ",", "*", "args", ",", "**", "kwargs", ")", "celery", ".", "Task", "=", "ContextTask", "return", "celery" ]
python
Initialise Celery and set up logging :param app: Flask app :param celery: Celery instance
true
2,483,934
def vi_pos_matching (line, index=0): '''find matching <([{}])>''' anchor = None target = None delta = 1 count = 0 try: while 1: if anchor is None: # first find anchor try: target, delta = _vi_dct_matching [line [index]] anchor = line [index] count = 1 except KeyError: index += 1 continue else: # Here the anchor has been found # Need to get corresponding target if index < 0: return -1 if line [index] == anchor: count += 1 elif line [index] == target: count -= 1 if count == 0: return index index += delta except IndexError: return -1
[ "def", "vi_pos_matching", "(", "line", ",", "index", "=", "0", ")", ":", "anchor", "=", "None", "target", "=", "None", "delta", "=", "1", "count", "=", "0", "try", ":", "while", "1", ":", "if", "anchor", "is", "None", ":", "try", ":", "target", ",", "delta", "=", "_vi_dct_matching", "[", "line", "[", "index", "]", "]", "anchor", "=", "line", "[", "index", "]", "count", "=", "1", "except", "KeyError", ":", "index", "+=", "1", "continue", "else", ":", "if", "index", "<", "0", ":", "return", "-", "1", "if", "line", "[", "index", "]", "==", "anchor", ":", "count", "+=", "1", "elif", "line", "[", "index", "]", "==", "target", ":", "count", "-=", "1", "if", "count", "==", "0", ":", "return", "index", "index", "+=", "delta", "except", "IndexError", ":", "return", "-", "1" ]
python
find matching <([{}])>
true
2,485,947
def smart_if(parser, token): ''' A smarter {% if %} tag for django templates. While retaining current Django functionality, it also handles equality, greater than and less than operators. Some common case examples:: {% if articles|length >= 5 %}...{% endif %} {% if "ifnotequal tag" != "beautiful" %}...{% endif %} Arguments and operators _must_ have a space between them, so ``{% if 1>2 %}`` is not a valid smart if tag. All supported operators are: ``or``, ``and``, ``in``, ``=`` (or ``==``), ``!=``, ``>``, ``>=``, ``<`` and ``<=``. ''' bits = token.split_contents()[1:] var = TemplateIfParser(parser, bits).parse() nodelist_true = parser.parse(('else', 'endif')) token = parser.next_token() if token.contents == 'else': nodelist_false = parser.parse(('endif',)) parser.delete_first_token() else: nodelist_false = None return SmartIfNode(var, nodelist_true, nodelist_false)
[ "def", "smart_if", "(", "parser", ",", "token", ")", ":", "bits", "=", "token", ".", "split_contents", "(", ")", "[", "1", ":", "]", "var", "=", "TemplateIfParser", "(", "parser", ",", "bits", ")", ".", "parse", "(", ")", "nodelist_true", "=", "parser", ".", "parse", "(", "(", "'else'", ",", "'endif'", ")", ")", "token", "=", "parser", ".", "next_token", "(", ")", "if", "token", ".", "contents", "==", "'else'", ":", "nodelist_false", "=", "parser", ".", "parse", "(", "(", "'endif'", ",", ")", ")", "parser", ".", "delete_first_token", "(", ")", "else", ":", "nodelist_false", "=", "None", "return", "SmartIfNode", "(", "var", ",", "nodelist_true", ",", "nodelist_false", ")" ]
python
A smarter {% if %} tag for django templates. While retaining current Django functionality, it also handles equality, greater than and less than operators. Some common case examples:: {% if articles|length >= 5 %}...{% endif %} {% if "ifnotequal tag" != "beautiful" %}...{% endif %} Arguments and operators _must_ have a space between them, so ``{% if 1>2 %}`` is not a valid smart if tag. All supported operators are: ``or``, ``and``, ``in``, ``=`` (or ``==``), ``!=``, ``>``, ``>=``, ``<`` and ``<=``.
true
2,486,090
def install_readline(hook): '''Set up things for the interpreter to call our function like GNU readline.''' global readline_hook, readline_ref # save the hook so the wrapper can call it readline_hook = hook # get the address of PyOS_ReadlineFunctionPointer so we can update it PyOS_RFP = c_void_p.from_address(Console.GetProcAddress(sys.dllhandle, "PyOS_ReadlineFunctionPointer")) # save a reference to the generated C-callable so it doesn't go away if sys.version < '2.3': readline_ref = HOOKFUNC22(hook_wrapper) else: readline_ref = HOOKFUNC23(hook_wrapper_23) # get the address of the function func_start = c_void_p.from_address(addressof(readline_ref)).value # write the function address into PyOS_ReadlineFunctionPointer PyOS_RFP.value = func_start
[ "def", "install_readline", "(", "hook", ")", ":", "global", "readline_hook", ",", "readline_ref", "readline_hook", "=", "hook", "PyOS_RFP", "=", "c_void_p", ".", "from_address", "(", "Console", ".", "GetProcAddress", "(", "sys", ".", "dllhandle", ",", "\"PyOS_ReadlineFunctionPointer\"", ")", ")", "if", "sys", ".", "version", "<", "'2.3'", ":", "readline_ref", "=", "HOOKFUNC22", "(", "hook_wrapper", ")", "else", ":", "readline_ref", "=", "HOOKFUNC23", "(", "hook_wrapper_23", ")", "func_start", "=", "c_void_p", ".", "from_address", "(", "addressof", "(", "readline_ref", ")", ")", ".", "value", "PyOS_RFP", ".", "value", "=", "func_start" ]
python
Set up things for the interpreter to call our function like GNU readline.
true
2,488,025
def oauth_url(client_id, permissions=None, server=None, redirect_uri=None): """A helper function that returns the OAuth2 URL for inviting the bot into servers. Parameters ----------- client_id : str The client ID for your bot. permissions : :class:`Permissions` The permissions you're requesting. If not given then you won't be requesting any permissions. server : :class:`Server` The server to pre-select in the authorization screen, if available. redirect_uri : str An optional valid redirect URI. """ url = 'https://discordapp.com/oauth2/authorize?client_id={}&scope=bot'.format(client_id) if permissions is not None: url = url + '&permissions=' + str(permissions.value) if server is not None: url = url + "&guild_id=" + server.id if redirect_uri is not None: from urllib.parse import urlencode url = url + "&response_type=code&" + urlencode({'redirect_uri': redirect_uri}) return url
[ "def", "oauth_url", "(", "client_id", ",", "permissions", "=", "None", ",", "server", "=", "None", ",", "redirect_uri", "=", "None", ")", ":", "url", "=", "'https://discordapp.com/oauth2/authorize?client_id={}&scope=bot'", ".", "format", "(", "client_id", ")", "if", "permissions", "is", "not", "None", ":", "url", "=", "url", "+", "'&permissions='", "+", "str", "(", "permissions", ".", "value", ")", "if", "server", "is", "not", "None", ":", "url", "=", "url", "+", "\"&guild_id=\"", "+", "server", ".", "id", "if", "redirect_uri", "is", "not", "None", ":", "from", "urllib", ".", "parse", "import", "urlencode", "url", "=", "url", "+", "\"&response_type=code&\"", "+", "urlencode", "(", "{", "'redirect_uri'", ":", "redirect_uri", "}", ")", "return", "url" ]
python
A helper function that returns the OAuth2 URL for inviting the bot into servers. Parameters ----------- client_id : str The client ID for your bot. permissions : :class:`Permissions` The permissions you're requesting. If not given then you won't be requesting any permissions. server : :class:`Server` The server to pre-select in the authorization screen, if available. redirect_uri : str An optional valid redirect URI.
true
2,488,416
def timelimit(timeout): """borrowed from web.py""" def _1(function): def _2(*args, **kw): class Dispatch(threading.Thread): def __init__(self): threading.Thread.__init__(self) self.result = None self.error = None self.setDaemon(True) self.start() def run(self): try: self.result = function(*args, **kw) except: self.error = sys.exc_info() c = Dispatch() c.join(timeout) if c.isAlive(): raise TimeoutError, 'took too long' if c.error: raise c.error[0], c.error[1] return c.result return _2 return _1
[ "def", "timelimit", "(", "timeout", ")", ":", "def", "_1", "(", "function", ")", ":", "def", "_2", "(", "*", "args", ",", "**", "kw", ")", ":", "class", "Dispatch", "(", "threading", ".", "Thread", ")", ":", "def", "__init__", "(", "self", ")", ":", "threading", ".", "Thread", ".", "__init__", "(", "self", ")", "self", ".", "result", "=", "None", "self", ".", "error", "=", "None", "self", ".", "setDaemon", "(", "True", ")", "self", ".", "start", "(", ")", "def", "run", "(", "self", ")", ":", "try", ":", "self", ".", "result", "=", "function", "(", "*", "args", ",", "**", "kw", ")", "except", ":", "self", ".", "error", "=", "sys", ".", "exc_info", "(", ")", "c", "=", "Dispatch", "(", ")", "c", ".", "join", "(", "timeout", ")", "if", "c", ".", "isAlive", "(", ")", ":", "raise", "TimeoutError", ",", "'took too long'", "if", "c", ".", "error", ":", "raise", "c", ".", "error", "[", "0", "]", ",", "c", ".", "error", "[", "1", "]", "return", "c", ".", "result", "return", "_2", "return", "_1" ]
python
borrowed from web.py
true
2,489,161
def add_arguments(parser, default_level=logging.INFO): """ Add arguments to an ArgumentParser or OptionParser for purposes of grabbing a logging level. """ adder = ( getattr(parser, 'add_argument', None) or getattr(parser, 'add_option') ) adder( '-l', '--log-level', default=default_level, type=log_level, help="Set log level (DEBUG, INFO, WARNING, ERROR)")
[ "def", "add_arguments", "(", "parser", ",", "default_level", "=", "logging", ".", "INFO", ")", ":", "adder", "=", "(", "getattr", "(", "parser", ",", "'add_argument'", ",", "None", ")", "or", "getattr", "(", "parser", ",", "'add_option'", ")", ")", "adder", "(", "'-l'", ",", "'--log-level'", ",", "default", "=", "default_level", ",", "type", "=", "log_level", ",", "help", "=", "\"Set log level (DEBUG, INFO, WARNING, ERROR)\"", ")" ]
python
Add arguments to an ArgumentParser or OptionParser for purposes of grabbing a logging level.
true
2,494,124
def grouper(n, iterable, fillvalue=None): "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx" args = [iter(iterable)] * n return itertools.izip_longest(fillvalue=fillvalue, *args)
[ "def", "grouper", "(", "n", ",", "iterable", ",", "fillvalue", "=", "None", ")", ":", "args", "=", "[", "iter", "(", "iterable", ")", "]", "*", "n", "return", "itertools", ".", "izip_longest", "(", "fillvalue", "=", "fillvalue", ",", "*", "args", ")" ]
python
grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx
true
2,497,405
def match_hostname(cert, hostname): """ This is a backport of the match_hostname() function from Python 3.2, essential when using SSL. Verifies that *cert* (in decoded format as returned by SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 rules are mostly followed, but IP addresses are not accepted for *hostname*. CertificateError is raised on failure. On success, the function returns nothing. """ if not cert: raise ValueError("empty or no certificate") dnsnames = [] san = cert.get('subjectAltName', ()) for key, value in san: if key == 'DNS': if _dnsname_to_pat(value).match(hostname): return dnsnames.append(value) if not san: # The subject is only checked when subjectAltName is empty for sub in cert.get('subject', ()): for key, value in sub: # XXX according to RFC 2818, the most specific Common Name # must be used. if key == 'commonName': if _dnsname_to_pat(value).match(hostname): return dnsnames.append(value) if len(dnsnames) > 1: raise CertificateError("hostname %r " "doesn't match either of %s" % (hostname, ', '.join(map(repr, dnsnames)))) elif len(dnsnames) == 1: raise CertificateError("hostname %r " "doesn't match %r" % (hostname, dnsnames[0])) else: raise CertificateError("no appropriate commonName or " "subjectAltName fields were found")
[ "def", "match_hostname", "(", "cert", ",", "hostname", ")", ":", "if", "not", "cert", ":", "raise", "ValueError", "(", "\"empty or no certificate\"", ")", "dnsnames", "=", "[", "]", "san", "=", "cert", ".", "get", "(", "'subjectAltName'", ",", "(", ")", ")", "for", "key", ",", "value", "in", "san", ":", "if", "key", "==", "'DNS'", ":", "if", "_dnsname_to_pat", "(", "value", ")", ".", "match", "(", "hostname", ")", ":", "return", "dnsnames", ".", "append", "(", "value", ")", "if", "not", "san", ":", "for", "sub", "in", "cert", ".", "get", "(", "'subject'", ",", "(", ")", ")", ":", "for", "key", ",", "value", "in", "sub", ":", "if", "key", "==", "'commonName'", ":", "if", "_dnsname_to_pat", "(", "value", ")", ".", "match", "(", "hostname", ")", ":", "return", "dnsnames", ".", "append", "(", "value", ")", "if", "len", "(", "dnsnames", ")", ">", "1", ":", "raise", "CertificateError", "(", "\"hostname %r \"", "\"doesn't match either of %s\"", "%", "(", "hostname", ",", "', '", ".", "join", "(", "map", "(", "repr", ",", "dnsnames", ")", ")", ")", ")", "elif", "len", "(", "dnsnames", ")", "==", "1", ":", "raise", "CertificateError", "(", "\"hostname %r \"", "\"doesn't match %r\"", "%", "(", "hostname", ",", "dnsnames", "[", "0", "]", ")", ")", "else", ":", "raise", "CertificateError", "(", "\"no appropriate commonName or \"", "\"subjectAltName fields were found\"", ")" ]
python
This is a backport of the match_hostname() function from Python 3.2, essential when using SSL. Verifies that *cert* (in decoded format as returned by SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 rules are mostly followed, but IP addresses are not accepted for *hostname*. CertificateError is raised on failure. On success, the function returns nothing.
true
2,498,227
def _textlist(self, _addtail=False): '''Returns a list of text strings contained within an element and its sub-elements. Helpful for extracting text from prose-oriented XML (such as XHTML or DocBook). ''' result = [] if (not _addtail) and (self.text is not None): result.append(self.text) for elem in self: result.extend(elem.textlist(True)) if _addtail and self.tail is not None: result.append(self.tail) return result
[ "def", "_textlist", "(", "self", ",", "_addtail", "=", "False", ")", ":", "result", "=", "[", "]", "if", "(", "not", "_addtail", ")", "and", "(", "self", ".", "text", "is", "not", "None", ")", ":", "result", ".", "append", "(", "self", ".", "text", ")", "for", "elem", "in", "self", ":", "result", ".", "extend", "(", "elem", ".", "textlist", "(", "True", ")", ")", "if", "_addtail", "and", "self", ".", "tail", "is", "not", "None", ":", "result", ".", "append", "(", "self", ".", "tail", ")", "return", "result" ]
python
Returns a list of text strings contained within an element and its sub-elements. Helpful for extracting text from prose-oriented XML (such as XHTML or DocBook).
true
2,498,230
def _reindent(s, indent, reformat=True): """ Remove the existing indentation from each line of a chunk of text, s, and then prefix each line with a new indent string. Also removes trailing whitespace from each line, and leading and trailing blank lines. """ s = textwrap.dedent(s) s = s.split('\n') s = [x.rstrip() for x in s] while s and (not s[0]): s = s[1:] while s and (not s[-1]): s = s[:-1] if reformat: s = '\n'.join(s) s = textwrap.wrap(s, initial_indent=indent, subsequent_indent=indent) else: s = [indent + x for x in s] return '\n'.join(s) + '\n'
[ "def", "_reindent", "(", "s", ",", "indent", ",", "reformat", "=", "True", ")", ":", "s", "=", "textwrap", ".", "dedent", "(", "s", ")", "s", "=", "s", ".", "split", "(", "'\\n'", ")", "s", "=", "[", "x", ".", "rstrip", "(", ")", "for", "x", "in", "s", "]", "while", "s", "and", "(", "not", "s", "[", "0", "]", ")", ":", "s", "=", "s", "[", "1", ":", "]", "while", "s", "and", "(", "not", "s", "[", "-", "1", "]", ")", ":", "s", "=", "s", "[", ":", "-", "1", "]", "if", "reformat", ":", "s", "=", "'\\n'", ".", "join", "(", "s", ")", "s", "=", "textwrap", ".", "wrap", "(", "s", ",", "initial_indent", "=", "indent", ",", "subsequent_indent", "=", "indent", ")", "else", ":", "s", "=", "[", "indent", "+", "x", "for", "x", "in", "s", "]", "return", "'\\n'", ".", "join", "(", "s", ")", "+", "'\\n'" ]
python
Remove the existing indentation from each line of a chunk of text, s, and then prefix each line with a new indent string. Also removes trailing whitespace from each line, and leading and trailing blank lines.
true
2,500,307
def jsmin(js, **kwargs): """ returns a minified version of the javascript string """ if not is_3: if cStringIO and not isinstance(js, unicode): # strings can use cStringIO for a 3x performance # improvement, but unicode (in python2) cannot klass = cStringIO.StringIO else: klass = StringIO.StringIO else: klass = io.StringIO ins = klass(js) outs = klass() JavascriptMinify(ins, outs, **kwargs).minify() return outs.getvalue()
[ "def", "jsmin", "(", "js", ",", "**", "kwargs", ")", ":", "if", "not", "is_3", ":", "if", "cStringIO", "and", "not", "isinstance", "(", "js", ",", "unicode", ")", ":", "klass", "=", "cStringIO", ".", "StringIO", "else", ":", "klass", "=", "StringIO", ".", "StringIO", "else", ":", "klass", "=", "io", ".", "StringIO", "ins", "=", "klass", "(", "js", ")", "outs", "=", "klass", "(", ")", "JavascriptMinify", "(", "ins", ",", "outs", ",", "**", "kwargs", ")", ".", "minify", "(", ")", "return", "outs", ".", "getvalue", "(", ")" ]
python
returns a minified version of the javascript string
true
2,500,399
def default_listener(col_attr, default): """Establish a default-setting listener.""" @event.listens_for(col_attr, "init_scalar", retval=True, propagate=True) def init_scalar(target, value, dict_): if default.is_callable: # the callable of ColumnDefault always accepts a context argument value = default.arg(None) elif default.is_scalar: value = default.arg else: raise NotImplementedError( "Can't invoke pre-default for a SQL-level column default") dict_[col_attr.key] = value return value
[ "def", "default_listener", "(", "col_attr", ",", "default", ")", ":", "@", "event", ".", "listens_for", "(", "col_attr", ",", "\"init_scalar\"", ",", "retval", "=", "True", ",", "propagate", "=", "True", ")", "def", "init_scalar", "(", "target", ",", "value", ",", "dict_", ")", ":", "if", "default", ".", "is_callable", ":", "value", "=", "default", ".", "arg", "(", "None", ")", "elif", "default", ".", "is_scalar", ":", "value", "=", "default", ".", "arg", "else", ":", "raise", "NotImplementedError", "(", "\"Can't invoke pre-default for a SQL-level column default\"", ")", "dict_", "[", "col_attr", ".", "key", "]", "=", "value", "return", "value" ]
python
Establish a default-setting listener.
true
2,500,589
def reorient(image, orientation): """Return reoriented view of image array. Parameters ---------- image : numpy array Non-squeezed output of asarray() functions. Axes -3 and -2 must be image length and width respectively. orientation : int or str One of TIFF_ORIENTATIONS keys or values. """ o = TIFF_ORIENTATIONS.get(orientation, orientation) if o == 'top_left': return image elif o == 'top_right': return image[..., ::-1, :] elif o == 'bottom_left': return image[..., ::-1, :, :] elif o == 'bottom_right': return image[..., ::-1, ::-1, :] elif o == 'left_top': return numpy.swapaxes(image, -3, -2) elif o == 'right_top': return numpy.swapaxes(image, -3, -2)[..., ::-1, :] elif o == 'left_bottom': return numpy.swapaxes(image, -3, -2)[..., ::-1, :, :] elif o == 'right_bottom': return numpy.swapaxes(image, -3, -2)[..., ::-1, ::-1, :]
[ "def", "reorient", "(", "image", ",", "orientation", ")", ":", "o", "=", "TIFF_ORIENTATIONS", ".", "get", "(", "orientation", ",", "orientation", ")", "if", "o", "==", "'top_left'", ":", "return", "image", "elif", "o", "==", "'top_right'", ":", "return", "image", "[", "...", ",", ":", ":", "-", "1", ",", ":", "]", "elif", "o", "==", "'bottom_left'", ":", "return", "image", "[", "...", ",", ":", ":", "-", "1", ",", ":", ",", ":", "]", "elif", "o", "==", "'bottom_right'", ":", "return", "image", "[", "...", ",", ":", ":", "-", "1", ",", ":", ":", "-", "1", ",", ":", "]", "elif", "o", "==", "'left_top'", ":", "return", "numpy", ".", "swapaxes", "(", "image", ",", "-", "3", ",", "-", "2", ")", "elif", "o", "==", "'right_top'", ":", "return", "numpy", ".", "swapaxes", "(", "image", ",", "-", "3", ",", "-", "2", ")", "[", "...", ",", ":", ":", "-", "1", ",", ":", "]", "elif", "o", "==", "'left_bottom'", ":", "return", "numpy", ".", "swapaxes", "(", "image", ",", "-", "3", ",", "-", "2", ")", "[", "...", ",", ":", ":", "-", "1", ",", ":", ",", ":", "]", "elif", "o", "==", "'right_bottom'", ":", "return", "numpy", ".", "swapaxes", "(", "image", ",", "-", "3", ",", "-", "2", ")", "[", "...", ",", ":", ":", "-", "1", ",", ":", ":", "-", "1", ",", ":", "]" ]
python
Return reoriented view of image array. Parameters ---------- image : numpy array Non-squeezed output of asarray() functions. Axes -3 and -2 must be image length and width respectively. orientation : int or str One of TIFF_ORIENTATIONS keys or values.
true
2,500,590
def stripnull(string): """Return string truncated at first null character.""" i = string.find(b'\x00') return string if (i < 0) else string[:i]
[ "def", "stripnull", "(", "string", ")", ":", "i", "=", "string", ".", "find", "(", "b'\\x00'", ")", "return", "string", "if", "(", "i", "<", "0", ")", "else", "string", "[", ":", "i", "]" ]
python
Return string truncated at first null character.
true
2,500,593
def imshow(data, title=None, vmin=0, vmax=None, cmap=None, bitspersample=None, photometric='rgb', interpolation='nearest', dpi=96, figure=None, subplot=111, maxdim=4096, **kwargs): """Plot n-dimensional images using matplotlib.pyplot. Return figure, subplot and plot axis. Requires pyplot already imported ``from matplotlib import pyplot``. Parameters ---------- bitspersample : int or None Number of bits per channel in integer RGB images. photometric : {'miniswhite', 'minisblack', 'rgb', or 'palette'} The color space of the image data. title : str Window and subplot title. figure : matplotlib.figure.Figure (optional). Matplotlib to use for plotting. subplot : int A matplotlib.pyplot.subplot axis. maxdim : int maximum image size in any dimension. kwargs : optional Arguments for matplotlib.pyplot.imshow. """ #if photometric not in ('miniswhite', 'minisblack', 'rgb', 'palette'): # raise ValueError("Can't handle %s photometrics" % photometric) isrgb = photometric in ('rgb', 'palette') data = numpy.atleast_2d(data.squeeze()) data = data[(slice(0, maxdim), ) * len(data.shape)] dims = data.ndim if dims < 2: raise ValueError("not an image") elif dims == 2: dims = 0 isrgb = False else: if (isrgb and data.shape[-3] in (3, 4)): data = numpy.swapaxes(data, -3, -2) data = numpy.swapaxes(data, -2, -1) elif (not isrgb and data.shape[-1] in (3, 4)): data = numpy.swapaxes(data, -3, -1) data = numpy.swapaxes(data, -2, -1) isrgb = isrgb and data.shape[-1] in (3, 4) dims -= 3 if isrgb else 2 if photometric == 'palette': datamax = data.max() if datamax > 255: data >>= 8 # possible precision loss data = data.astype('B') elif data.dtype.kind in 'ui': if not isrgb or bitspersample is None: bitspersample = int(math.ceil(math.log(data.max(), 2))) elif not isinstance(bitspersample, int): # bitspersample can be tuple, e.g. (5, 6, 5) bitspersample = data.dtype.itemsize * 8 datamax = 2**bitspersample if isrgb: if bitspersample < 8: data <<= 8 - bitspersample elif bitspersample > 8: data >>= bitspersample - 8 # precision loss data = data.astype('B') elif data.dtype.kind == 'f': datamax = data.max() if isrgb and datamax > 1.0: if data.dtype.char == 'd': data = data.astype('f') data /= datamax elif data.dtype.kind == 'b': datamax = 1 if vmax is None: vmax = datamax if vmin is None: if data.dtype.kind != 'f': vmin = 0 pyplot = sys.modules['matplotlib.pyplot'] if figure is None: pyplot.rc('font', family='sans-serif', weight='normal', size=8) figure = pyplot.figure(dpi=dpi, figsize=(10.3, 6.3), frameon=True, facecolor='1.0', edgecolor='w') try: figure.canvas.manager.window.title(title) except Exception: pass pyplot.subplots_adjust(bottom=0.03*(dims+2), top=0.9, left=0.1, right=0.95, hspace=0.05, wspace=0.0) subplot = pyplot.subplot(subplot) if title: pyplot.title(title, size=11) if cmap is None: if photometric == 'miniswhite': cmap = 'gray_r' if vmin == 0 else 'coolwarm_r' else: cmap = 'gray' if vmin == 0 else 'coolwarm' image = pyplot.imshow(data[(0, ) * dims].squeeze(), vmin=vmin, vmax=vmax, cmap=cmap, interpolation=interpolation, **kwargs) if not isrgb: pyplot.colorbar() # panchor=(0.55, 0.5), fraction=0.05 def format_coord(x, y): # callback function to format coordinate display in toolbar x = int(x + 0.5) y = int(y + 0.5) try: if dims: return "%s @ %s [%4i, %4i]" % (cur_ax_dat[1][y, x], current, x, y) else: return "%s @ [%4i, %4i]" % (data[y, x], x, y) except IndexError: return "" pyplot.gca().format_coord = format_coord if dims: current = list((0, ) * dims) cur_ax_dat = [0, data[tuple(current)].squeeze()] sliders = [pyplot.Slider( pyplot.axes([0.125, 0.03*(axis+1), 0.725, 0.025]), 'Dimension %i' % axis, 0, data.shape[axis]-1, 0, facecolor='0.5', valfmt='%%.0f [%i]' % data.shape[axis]) for axis in range(dims)] for slider in sliders: slider.drawon = False def set_image(current, sliders=sliders, data=data): # change image and redraw canvas cur_ax_dat[1] = data[tuple(current)].squeeze() image.set_data(cur_ax_dat[1]) for ctrl, index in zip(sliders, current): ctrl.eventson = False ctrl.set_val(index) ctrl.eventson = True figure.canvas.draw() def on_changed(index, axis, data=data, current=current): # callback function for slider change event index = int(round(index)) cur_ax_dat[0] = axis if index == current[axis]: return if index >= data.shape[axis]: index = 0 elif index < 0: index = data.shape[axis] - 1 current[axis] = index set_image(current) def on_keypressed(event, data=data, current=current): # callback function for key press event key = event.key axis = cur_ax_dat[0] if str(key) in '0123456789': on_changed(key, axis) elif key == 'right': on_changed(current[axis] + 1, axis) elif key == 'left': on_changed(current[axis] - 1, axis) elif key == 'up': cur_ax_dat[0] = 0 if axis == len(data.shape)-1 else axis + 1 elif key == 'down': cur_ax_dat[0] = len(data.shape)-1 if axis == 0 else axis - 1 elif key == 'end': on_changed(data.shape[axis] - 1, axis) elif key == 'home': on_changed(0, axis) figure.canvas.mpl_connect('key_press_event', on_keypressed) for axis, ctrl in enumerate(sliders): ctrl.on_changed(lambda k, a=axis: on_changed(k, a)) return figure, subplot, image
[ "def", "imshow", "(", "data", ",", "title", "=", "None", ",", "vmin", "=", "0", ",", "vmax", "=", "None", ",", "cmap", "=", "None", ",", "bitspersample", "=", "None", ",", "photometric", "=", "'rgb'", ",", "interpolation", "=", "'nearest'", ",", "dpi", "=", "96", ",", "figure", "=", "None", ",", "subplot", "=", "111", ",", "maxdim", "=", "4096", ",", "**", "kwargs", ")", ":", "isrgb", "=", "photometric", "in", "(", "'rgb'", ",", "'palette'", ")", "data", "=", "numpy", ".", "atleast_2d", "(", "data", ".", "squeeze", "(", ")", ")", "data", "=", "data", "[", "(", "slice", "(", "0", ",", "maxdim", ")", ",", ")", "*", "len", "(", "data", ".", "shape", ")", "]", "dims", "=", "data", ".", "ndim", "if", "dims", "<", "2", ":", "raise", "ValueError", "(", "\"not an image\"", ")", "elif", "dims", "==", "2", ":", "dims", "=", "0", "isrgb", "=", "False", "else", ":", "if", "(", "isrgb", "and", "data", ".", "shape", "[", "-", "3", "]", "in", "(", "3", ",", "4", ")", ")", ":", "data", "=", "numpy", ".", "swapaxes", "(", "data", ",", "-", "3", ",", "-", "2", ")", "data", "=", "numpy", ".", "swapaxes", "(", "data", ",", "-", "2", ",", "-", "1", ")", "elif", "(", "not", "isrgb", "and", "data", ".", "shape", "[", "-", "1", "]", "in", "(", "3", ",", "4", ")", ")", ":", "data", "=", "numpy", ".", "swapaxes", "(", "data", ",", "-", "3", ",", "-", "1", ")", "data", "=", "numpy", ".", "swapaxes", "(", "data", ",", "-", "2", ",", "-", "1", ")", "isrgb", "=", "isrgb", "and", "data", ".", "shape", "[", "-", "1", "]", "in", "(", "3", ",", "4", ")", "dims", "-=", "3", "if", "isrgb", "else", "2", "if", "photometric", "==", "'palette'", ":", "datamax", "=", "data", ".", "max", "(", ")", "if", "datamax", ">", "255", ":", "data", ">>=", "8", "data", "=", "data", ".", "astype", "(", "'B'", ")", "elif", "data", ".", "dtype", ".", "kind", "in", "'ui'", ":", "if", "not", "isrgb", "or", "bitspersample", "is", "None", ":", "bitspersample", "=", "int", "(", "math", ".", "ceil", "(", "math", ".", "log", "(", "data", ".", "max", "(", ")", ",", "2", ")", ")", ")", "elif", "not", "isinstance", "(", "bitspersample", ",", "int", ")", ":", "bitspersample", "=", "data", ".", "dtype", ".", "itemsize", "*", "8", "datamax", "=", "2", "**", "bitspersample", "if", "isrgb", ":", "if", "bitspersample", "<", "8", ":", "data", "<<=", "8", "-", "bitspersample", "elif", "bitspersample", ">", "8", ":", "data", ">>=", "bitspersample", "-", "8", "data", "=", "data", ".", "astype", "(", "'B'", ")", "elif", "data", ".", "dtype", ".", "kind", "==", "'f'", ":", "datamax", "=", "data", ".", "max", "(", ")", "if", "isrgb", "and", "datamax", ">", "1.0", ":", "if", "data", ".", "dtype", ".", "char", "==", "'d'", ":", "data", "=", "data", ".", "astype", "(", "'f'", ")", "data", "/=", "datamax", "elif", "data", ".", "dtype", ".", "kind", "==", "'b'", ":", "datamax", "=", "1", "if", "vmax", "is", "None", ":", "vmax", "=", "datamax", "if", "vmin", "is", "None", ":", "if", "data", ".", "dtype", ".", "kind", "!=", "'f'", ":", "vmin", "=", "0", "pyplot", "=", "sys", ".", "modules", "[", "'matplotlib.pyplot'", "]", "if", "figure", "is", "None", ":", "pyplot", ".", "rc", "(", "'font'", ",", "family", "=", "'sans-serif'", ",", "weight", "=", "'normal'", ",", "size", "=", "8", ")", "figure", "=", "pyplot", ".", "figure", "(", "dpi", "=", "dpi", ",", "figsize", "=", "(", "10.3", ",", "6.3", ")", ",", "frameon", "=", "True", ",", "facecolor", "=", "'1.0'", ",", "edgecolor", "=", "'w'", ")", "try", ":", "figure", ".", "canvas", ".", "manager", ".", "window", ".", "title", "(", "title", ")", "except", "Exception", ":", "pass", "pyplot", ".", "subplots_adjust", "(", "bottom", "=", "0.03", "*", "(", "dims", "+", "2", ")", ",", "top", "=", "0.9", ",", "left", "=", "0.1", ",", "right", "=", "0.95", ",", "hspace", "=", "0.05", ",", "wspace", "=", "0.0", ")", "subplot", "=", "pyplot", ".", "subplot", "(", "subplot", ")", "if", "title", ":", "pyplot", ".", "title", "(", "title", ",", "size", "=", "11", ")", "if", "cmap", "is", "None", ":", "if", "photometric", "==", "'miniswhite'", ":", "cmap", "=", "'gray_r'", "if", "vmin", "==", "0", "else", "'coolwarm_r'", "else", ":", "cmap", "=", "'gray'", "if", "vmin", "==", "0", "else", "'coolwarm'", "image", "=", "pyplot", ".", "imshow", "(", "data", "[", "(", "0", ",", ")", "*", "dims", "]", ".", "squeeze", "(", ")", ",", "vmin", "=", "vmin", ",", "vmax", "=", "vmax", ",", "cmap", "=", "cmap", ",", "interpolation", "=", "interpolation", ",", "**", "kwargs", ")", "if", "not", "isrgb", ":", "pyplot", ".", "colorbar", "(", ")", "def", "format_coord", "(", "x", ",", "y", ")", ":", "x", "=", "int", "(", "x", "+", "0.5", ")", "y", "=", "int", "(", "y", "+", "0.5", ")", "try", ":", "if", "dims", ":", "return", "\"%s @ %s [%4i, %4i]\"", "%", "(", "cur_ax_dat", "[", "1", "]", "[", "y", ",", "x", "]", ",", "current", ",", "x", ",", "y", ")", "else", ":", "return", "\"%s @ [%4i, %4i]\"", "%", "(", "data", "[", "y", ",", "x", "]", ",", "x", ",", "y", ")", "except", "IndexError", ":", "return", "\"\"", "pyplot", ".", "gca", "(", ")", ".", "format_coord", "=", "format_coord", "if", "dims", ":", "current", "=", "list", "(", "(", "0", ",", ")", "*", "dims", ")", "cur_ax_dat", "=", "[", "0", ",", "data", "[", "tuple", "(", "current", ")", "]", ".", "squeeze", "(", ")", "]", "sliders", "=", "[", "pyplot", ".", "Slider", "(", "pyplot", ".", "axes", "(", "[", "0.125", ",", "0.03", "*", "(", "axis", "+", "1", ")", ",", "0.725", ",", "0.025", "]", ")", ",", "'Dimension %i'", "%", "axis", ",", "0", ",", "data", ".", "shape", "[", "axis", "]", "-", "1", ",", "0", ",", "facecolor", "=", "'0.5'", ",", "valfmt", "=", "'%%.0f [%i]'", "%", "data", ".", "shape", "[", "axis", "]", ")", "for", "axis", "in", "range", "(", "dims", ")", "]", "for", "slider", "in", "sliders", ":", "slider", ".", "drawon", "=", "False", "def", "set_image", "(", "current", ",", "sliders", "=", "sliders", ",", "data", "=", "data", ")", ":", "cur_ax_dat", "[", "1", "]", "=", "data", "[", "tuple", "(", "current", ")", "]", ".", "squeeze", "(", ")", "image", ".", "set_data", "(", "cur_ax_dat", "[", "1", "]", ")", "for", "ctrl", ",", "index", "in", "zip", "(", "sliders", ",", "current", ")", ":", "ctrl", ".", "eventson", "=", "False", "ctrl", ".", "set_val", "(", "index", ")", "ctrl", ".", "eventson", "=", "True", "figure", ".", "canvas", ".", "draw", "(", ")", "def", "on_changed", "(", "index", ",", "axis", ",", "data", "=", "data", ",", "current", "=", "current", ")", ":", "index", "=", "int", "(", "round", "(", "index", ")", ")", "cur_ax_dat", "[", "0", "]", "=", "axis", "if", "index", "==", "current", "[", "axis", "]", ":", "return", "if", "index", ">=", "data", ".", "shape", "[", "axis", "]", ":", "index", "=", "0", "elif", "index", "<", "0", ":", "index", "=", "data", ".", "shape", "[", "axis", "]", "-", "1", "current", "[", "axis", "]", "=", "index", "set_image", "(", "current", ")", "def", "on_keypressed", "(", "event", ",", "data", "=", "data", ",", "current", "=", "current", ")", ":", "key", "=", "event", ".", "key", "axis", "=", "cur_ax_dat", "[", "0", "]", "if", "str", "(", "key", ")", "in", "'0123456789'", ":", "on_changed", "(", "key", ",", "axis", ")", "elif", "key", "==", "'right'", ":", "on_changed", "(", "current", "[", "axis", "]", "+", "1", ",", "axis", ")", "elif", "key", "==", "'left'", ":", "on_changed", "(", "current", "[", "axis", "]", "-", "1", ",", "axis", ")", "elif", "key", "==", "'up'", ":", "cur_ax_dat", "[", "0", "]", "=", "0", "if", "axis", "==", "len", "(", "data", ".", "shape", ")", "-", "1", "else", "axis", "+", "1", "elif", "key", "==", "'down'", ":", "cur_ax_dat", "[", "0", "]", "=", "len", "(", "data", ".", "shape", ")", "-", "1", "if", "axis", "==", "0", "else", "axis", "-", "1", "elif", "key", "==", "'end'", ":", "on_changed", "(", "data", ".", "shape", "[", "axis", "]", "-", "1", ",", "axis", ")", "elif", "key", "==", "'home'", ":", "on_changed", "(", "0", ",", "axis", ")", "figure", ".", "canvas", ".", "mpl_connect", "(", "'key_press_event'", ",", "on_keypressed", ")", "for", "axis", ",", "ctrl", "in", "enumerate", "(", "sliders", ")", ":", "ctrl", ".", "on_changed", "(", "lambda", "k", ",", "a", "=", "axis", ":", "on_changed", "(", "k", ",", "a", ")", ")", "return", "figure", ",", "subplot", ",", "image" ]
python
Plot n-dimensional images using matplotlib.pyplot. Return figure, subplot and plot axis. Requires pyplot already imported ``from matplotlib import pyplot``. Parameters ---------- bitspersample : int or None Number of bits per channel in integer RGB images. photometric : {'miniswhite', 'minisblack', 'rgb', or 'palette'} The color space of the image data. title : str Window and subplot title. figure : matplotlib.figure.Figure (optional). Matplotlib to use for plotting. subplot : int A matplotlib.pyplot.subplot axis. maxdim : int maximum image size in any dimension. kwargs : optional Arguments for matplotlib.pyplot.imshow.
true
2,504,270
def wait_until(predicate, success_description, timeout=10): """Wait up to 10 seconds (by default) for predicate to be true. E.g.: wait_until(lambda: client.primary == ('a', 1), 'connect to the primary') If the lambda-expression isn't true after 10 seconds, we raise AssertionError("Didn't ever connect to the primary"). Returns the predicate's first true value. """ start = time.time() while True: retval = predicate() if retval: return retval if time.time() - start > timeout: raise AssertionError("Didn't ever %s" % success_description) time.sleep(0.1)
[ "def", "wait_until", "(", "predicate", ",", "success_description", ",", "timeout", "=", "10", ")", ":", "start", "=", "time", ".", "time", "(", ")", "while", "True", ":", "retval", "=", "predicate", "(", ")", "if", "retval", ":", "return", "retval", "if", "time", ".", "time", "(", ")", "-", "start", ">", "timeout", ":", "raise", "AssertionError", "(", "\"Didn't ever %s\"", "%", "success_description", ")", "time", ".", "sleep", "(", "0.1", ")" ]
python
Wait up to 10 seconds (by default) for predicate to be true. E.g.: wait_until(lambda: client.primary == ('a', 1), 'connect to the primary') If the lambda-expression isn't true after 10 seconds, we raise AssertionError("Didn't ever connect to the primary"). Returns the predicate's first true value.
true
2,507,872
def apns_send_bulk_message(registration_ids, alert, **kwargs): """ Sends an APNS notification to one or more registration_ids. The registration_ids argument needs to be a list. Note that if set alert should always be a string. If it is not set, it won't be included in the notification. You will need to pass None to this for silent notifications. """ with closing(_apns_create_socket_to_push(**kwargs)) as socket: for identifier, registration_id in enumerate(registration_ids): _apns_send(registration_id, alert, identifier=identifier, socket=socket, **kwargs) _apns_check_errors(socket)
[ "def", "apns_send_bulk_message", "(", "registration_ids", ",", "alert", ",", "**", "kwargs", ")", ":", "with", "closing", "(", "_apns_create_socket_to_push", "(", "**", "kwargs", ")", ")", "as", "socket", ":", "for", "identifier", ",", "registration_id", "in", "enumerate", "(", "registration_ids", ")", ":", "_apns_send", "(", "registration_id", ",", "alert", ",", "identifier", "=", "identifier", ",", "socket", "=", "socket", ",", "**", "kwargs", ")", "_apns_check_errors", "(", "socket", ")" ]
python
Sends an APNS notification to one or more registration_ids. The registration_ids argument needs to be a list. Note that if set alert should always be a string. If it is not set, it won't be included in the notification. You will need to pass None to this for silent notifications.
true
2,508,518
def latex_visit_inheritance_diagram(self, node): # type: (nodes.NodeVisitor, inheritance_diagram) -> None """ Output the graph for LaTeX. This will insert a PDF. """ graph = node['graph'] graph_hash = get_graph_hash(node) name = 'inheritance%s' % graph_hash dotcode = graph.generate_dot(name, env=self.builder.env, graph_attrs={'size': '"6.0,6.0"'}) render_dot_latex(self, node, dotcode, {}, 'inheritance') raise nodes.SkipNode
[ "def", "latex_visit_inheritance_diagram", "(", "self", ",", "node", ")", ":", "graph", "=", "node", "[", "'graph'", "]", "graph_hash", "=", "get_graph_hash", "(", "node", ")", "name", "=", "'inheritance%s'", "%", "graph_hash", "dotcode", "=", "graph", ".", "generate_dot", "(", "name", ",", "env", "=", "self", ".", "builder", ".", "env", ",", "graph_attrs", "=", "{", "'size'", ":", "'\"6.0,6.0\"'", "}", ")", "render_dot_latex", "(", "self", ",", "node", ",", "dotcode", ",", "{", "}", ",", "'inheritance'", ")", "raise", "nodes", ".", "SkipNode" ]
python
Output the graph for LaTeX. This will insert a PDF.
true
2,508,519
def texinfo_visit_inheritance_diagram(self, node): # type: (nodes.NodeVisitor, inheritance_diagram) -> None """ Output the graph for Texinfo. This will insert a PNG. """ graph = node['graph'] graph_hash = get_graph_hash(node) name = 'inheritance%s' % graph_hash dotcode = graph.generate_dot(name, env=self.builder.env, graph_attrs={'size': '"6.0,6.0"'}) render_dot_texinfo(self, node, dotcode, {}, 'inheritance') raise nodes.SkipNode
[ "def", "texinfo_visit_inheritance_diagram", "(", "self", ",", "node", ")", ":", "graph", "=", "node", "[", "'graph'", "]", "graph_hash", "=", "get_graph_hash", "(", "node", ")", "name", "=", "'inheritance%s'", "%", "graph_hash", "dotcode", "=", "graph", ".", "generate_dot", "(", "name", ",", "env", "=", "self", ".", "builder", ".", "env", ",", "graph_attrs", "=", "{", "'size'", ":", "'\"6.0,6.0\"'", "}", ")", "render_dot_texinfo", "(", "self", ",", "node", ",", "dotcode", ",", "{", "}", ",", "'inheritance'", ")", "raise", "nodes", ".", "SkipNode" ]
python
Output the graph for Texinfo. This will insert a PNG.
true
2,509,128
def fdrcorrection(pvals, alpha=0.05, method='indep'): ''' NOTE: This function was copied from statsmodels.sandbox.stats.multicomp.fdrcorrection0, from statsmodels version 0.5.0. This is to avoid requiring all of statsmodels to be a dependency for metaseq, just for this function. pvalue correction for false discovery rate This covers Benjamini/Hochberg for independent or positively correlated and Benjamini/Yekutieli for general or negatively correlated tests. Both are available in the function multipletests, as method=`fdr_bh`, resp. `fdr_by`. Parameters ---------- pvals : array_like set of p-values of the individual tests. alpha : float error rate method : {'indep', 'negcorr') Returns ------- rejected : array, bool True if a hypothesis is rejected, False if not pvalue-corrected : array pvalues adjusted for multiple hypothesis testing to limit FDR Notes ----- If there is prior information on the fraction of true hypothesis, then alpha should be set to alpha * m/m_0 where m is the number of tests, given by the p-values, and m_0 is an estimate of the true hypothesis. (see Benjamini, Krieger and Yekuteli) The two-step method of Benjamini, Krieger and Yekutiel that estimates the number of false hypotheses will be available (soon). Method names can be abbreviated to first letter, 'i' or 'p' for fdr_bh and 'n' for fdr_by. ''' pvals = np.asarray(pvals) pvals_sortind = np.argsort(pvals) pvals_sorted = pvals[pvals_sortind] sortrevind = pvals_sortind.argsort() if method in ['i', 'indep', 'p', 'poscorr']: ecdffactor = _ecdf(pvals_sorted) elif method in ['n', 'negcorr']: cm = np.sum(1./np.arange(1, len(pvals_sorted)+1)) # corrected this ecdffactor = _ecdf(pvals_sorted) / cm # elif method in ['n', 'negcorr']: # cm = np.sum(np.arange(len(pvals))) # ecdffactor = ecdf(pvals_sorted)/cm else: raise ValueError('only indep and necorr implemented') reject = pvals_sorted <= ecdffactor*alpha if reject.any(): rejectmax = max(np.nonzero(reject)[0]) reject[:rejectmax] = True pvals_corrected_raw = pvals_sorted / ecdffactor pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1] pvals_corrected[pvals_corrected > 1] = 1 return reject[sortrevind], pvals_corrected[sortrevind]
[ "def", "fdrcorrection", "(", "pvals", ",", "alpha", "=", "0.05", ",", "method", "=", "'indep'", ")", ":", "pvals", "=", "np", ".", "asarray", "(", "pvals", ")", "pvals_sortind", "=", "np", ".", "argsort", "(", "pvals", ")", "pvals_sorted", "=", "pvals", "[", "pvals_sortind", "]", "sortrevind", "=", "pvals_sortind", ".", "argsort", "(", ")", "if", "method", "in", "[", "'i'", ",", "'indep'", ",", "'p'", ",", "'poscorr'", "]", ":", "ecdffactor", "=", "_ecdf", "(", "pvals_sorted", ")", "elif", "method", "in", "[", "'n'", ",", "'negcorr'", "]", ":", "cm", "=", "np", ".", "sum", "(", "1.", "/", "np", ".", "arange", "(", "1", ",", "len", "(", "pvals_sorted", ")", "+", "1", ")", ")", "ecdffactor", "=", "_ecdf", "(", "pvals_sorted", ")", "/", "cm", "else", ":", "raise", "ValueError", "(", "'only indep and necorr implemented'", ")", "reject", "=", "pvals_sorted", "<=", "ecdffactor", "*", "alpha", "if", "reject", ".", "any", "(", ")", ":", "rejectmax", "=", "max", "(", "np", ".", "nonzero", "(", "reject", ")", "[", "0", "]", ")", "reject", "[", ":", "rejectmax", "]", "=", "True", "pvals_corrected_raw", "=", "pvals_sorted", "/", "ecdffactor", "pvals_corrected", "=", "np", ".", "minimum", ".", "accumulate", "(", "pvals_corrected_raw", "[", ":", ":", "-", "1", "]", ")", "[", ":", ":", "-", "1", "]", "pvals_corrected", "[", "pvals_corrected", ">", "1", "]", "=", "1", "return", "reject", "[", "sortrevind", "]", ",", "pvals_corrected", "[", "sortrevind", "]" ]
python
NOTE: This function was copied from statsmodels.sandbox.stats.multicomp.fdrcorrection0, from statsmodels version 0.5.0. This is to avoid requiring all of statsmodels to be a dependency for metaseq, just for this function. pvalue correction for false discovery rate This covers Benjamini/Hochberg for independent or positively correlated and Benjamini/Yekutieli for general or negatively correlated tests. Both are available in the function multipletests, as method=`fdr_bh`, resp. `fdr_by`. Parameters ---------- pvals : array_like set of p-values of the individual tests. alpha : float error rate method : {'indep', 'negcorr') Returns ------- rejected : array, bool True if a hypothesis is rejected, False if not pvalue-corrected : array pvalues adjusted for multiple hypothesis testing to limit FDR Notes ----- If there is prior information on the fraction of true hypothesis, then alpha should be set to alpha * m/m_0 where m is the number of tests, given by the p-values, and m_0 is an estimate of the true hypothesis. (see Benjamini, Krieger and Yekuteli) The two-step method of Benjamini, Krieger and Yekutiel that estimates the number of false hypotheses will be available (soon). Method names can be abbreviated to first letter, 'i' or 'p' for fdr_bh and 'n' for fdr_by.
true
2,510,886
def _next_regular(target): """ Find the next regular number greater than or equal to target. Regular numbers are composites of the prime factors 2, 3, and 5. Also known as 5-smooth numbers or Hamming numbers, these are the optimal size for inputs to FFTPACK. Target must be a positive integer. """ if target <= 6: return target # Quickly check if it's already a power of 2 if not (target & (target - 1)): return target match = float('inf') # Anything found will be smaller p5 = 1 while p5 < target: p35 = p5 while p35 < target: # Ceiling integer division, avoiding conversion to float # (quotient = ceil(target / p35)) quotient = -(-target // p35) # Quickly find next power of 2 >= quotient try: p2 = 2 ** ((quotient - 1).bit_length()) except AttributeError: # Fallback for Python <2.7 p2 = 2 ** _bit_length_26(quotient - 1) N = p2 * p35 if N == target: return N elif N < match: match = N p35 *= 3 if p35 == target: return p35 if p35 < match: match = p35 p5 *= 5 if p5 == target: return p5 if p5 < match: match = p5 return match
[ "def", "_next_regular", "(", "target", ")", ":", "if", "target", "<=", "6", ":", "return", "target", "if", "not", "(", "target", "&", "(", "target", "-", "1", ")", ")", ":", "return", "target", "match", "=", "float", "(", "'inf'", ")", "p5", "=", "1", "while", "p5", "<", "target", ":", "p35", "=", "p5", "while", "p35", "<", "target", ":", "quotient", "=", "-", "(", "-", "target", "//", "p35", ")", "try", ":", "p2", "=", "2", "**", "(", "(", "quotient", "-", "1", ")", ".", "bit_length", "(", ")", ")", "except", "AttributeError", ":", "p2", "=", "2", "**", "_bit_length_26", "(", "quotient", "-", "1", ")", "N", "=", "p2", "*", "p35", "if", "N", "==", "target", ":", "return", "N", "elif", "N", "<", "match", ":", "match", "=", "N", "p35", "*=", "3", "if", "p35", "==", "target", ":", "return", "p35", "if", "p35", "<", "match", ":", "match", "=", "p35", "p5", "*=", "5", "if", "p5", "==", "target", ":", "return", "p5", "if", "p5", "<", "match", ":", "match", "=", "p5", "return", "match" ]
python
Find the next regular number greater than or equal to target. Regular numbers are composites of the prime factors 2, 3, and 5. Also known as 5-smooth numbers or Hamming numbers, these are the optimal size for inputs to FFTPACK. Target must be a positive integer.
true
2,511,240
def make_virtual_offset(block_start_offset, within_block_offset): """Compute a BGZF virtual offset from block start and within block offsets. The BAM indexing scheme records read positions using a 64 bit 'virtual offset', comprising in C terms: block_start_offset << 16 | within_block_offset Here block_start_offset is the file offset of the BGZF block start (unsigned integer using up to 64-16 = 48 bits), and within_block_offset within the (decompressed) block (unsigned 16 bit integer). >>> make_virtual_offset(0, 0) 0 >>> make_virtual_offset(0, 1) 1 >>> make_virtual_offset(0, 2**16 - 1) 65535 >>> make_virtual_offset(0, 2**16) Traceback (most recent call last): ... ValueError: Require 0 <= within_block_offset < 2**16, got 65536 >>> 65536 == make_virtual_offset(1, 0) True >>> 65537 == make_virtual_offset(1, 1) True >>> 131071 == make_virtual_offset(1, 2**16 - 1) True >>> 6553600000 == make_virtual_offset(100000, 0) True >>> 6553600001 == make_virtual_offset(100000, 1) True >>> 6553600010 == make_virtual_offset(100000, 10) True >>> make_virtual_offset(2**48, 0) Traceback (most recent call last): ... ValueError: Require 0 <= block_start_offset < 2**48, got 281474976710656 """ if within_block_offset < 0 or within_block_offset >= 65536: raise ValueError("Require 0 <= within_block_offset < 2**16, got %i" % within_block_offset) if block_start_offset < 0 or block_start_offset >= 281474976710656: raise ValueError("Require 0 <= block_start_offset < 2**48, got %i" % block_start_offset) return (block_start_offset << 16) | within_block_offset
[ "def", "make_virtual_offset", "(", "block_start_offset", ",", "within_block_offset", ")", ":", "if", "within_block_offset", "<", "0", "or", "within_block_offset", ">=", "65536", ":", "raise", "ValueError", "(", "\"Require 0 <= within_block_offset < 2**16, got %i\"", "%", "within_block_offset", ")", "if", "block_start_offset", "<", "0", "or", "block_start_offset", ">=", "281474976710656", ":", "raise", "ValueError", "(", "\"Require 0 <= block_start_offset < 2**48, got %i\"", "%", "block_start_offset", ")", "return", "(", "block_start_offset", "<<", "16", ")", "|", "within_block_offset" ]
python
Compute a BGZF virtual offset from block start and within block offsets. The BAM indexing scheme records read positions using a 64 bit 'virtual offset', comprising in C terms: block_start_offset << 16 | within_block_offset Here block_start_offset is the file offset of the BGZF block start (unsigned integer using up to 64-16 = 48 bits), and within_block_offset within the (decompressed) block (unsigned 16 bit integer). >>> make_virtual_offset(0, 0) 0 >>> make_virtual_offset(0, 1) 1 >>> make_virtual_offset(0, 2**16 - 1) 65535 >>> make_virtual_offset(0, 2**16) Traceback (most recent call last): ... ValueError: Require 0 <= within_block_offset < 2**16, got 65536 >>> 65536 == make_virtual_offset(1, 0) True >>> 65537 == make_virtual_offset(1, 1) True >>> 131071 == make_virtual_offset(1, 2**16 - 1) True >>> 6553600000 == make_virtual_offset(100000, 0) True >>> 6553600001 == make_virtual_offset(100000, 1) True >>> 6553600010 == make_virtual_offset(100000, 10) True >>> make_virtual_offset(2**48, 0) Traceback (most recent call last): ... ValueError: Require 0 <= block_start_offset < 2**48, got 281474976710656
true
2,513,189
def roundrobin(*iterables): "roundrobin('ABC', 'D', 'EF') --> A D E B F C" # Recipe credited to George Sakkis pending = len(iterables) nexts = itertools.cycle(iter(it).next for it in iterables) while pending: try: for next in nexts: yield next() except StopIteration: pending -= 1 nexts = itertools.cycle(itertools.islice(nexts, pending))
[ "def", "roundrobin", "(", "*", "iterables", ")", ":", "pending", "=", "len", "(", "iterables", ")", "nexts", "=", "itertools", ".", "cycle", "(", "iter", "(", "it", ")", ".", "next", "for", "it", "in", "iterables", ")", "while", "pending", ":", "try", ":", "for", "next", "in", "nexts", ":", "yield", "next", "(", ")", "except", "StopIteration", ":", "pending", "-=", "1", "nexts", "=", "itertools", ".", "cycle", "(", "itertools", ".", "islice", "(", "nexts", ",", "pending", ")", ")" ]
python
roundrobin('ABC', 'D', 'EF') --> A D E B F C
true
2,513,544
def _chunk_actions(actions, chunk_size, max_chunk_bytes, serializer): """ Split actions into chunks by number or size, serialize them into strings in the process. """ bulk_actions = [] size, action_count = 0, 0 for action, data in actions: action = serializer.dumps(action) cur_size = len(action) + 1 if data is not None: data = serializer.dumps(data) cur_size += len(data) + 1 # full chunk, send it and start a new one if bulk_actions and (size + cur_size > max_chunk_bytes or action_count == chunk_size): yield bulk_actions bulk_actions = [] size, action_count = 0, 0 bulk_actions.append(action) if data is not None: bulk_actions.append(data) size += cur_size action_count += 1 if bulk_actions: yield bulk_actions
[ "def", "_chunk_actions", "(", "actions", ",", "chunk_size", ",", "max_chunk_bytes", ",", "serializer", ")", ":", "bulk_actions", "=", "[", "]", "size", ",", "action_count", "=", "0", ",", "0", "for", "action", ",", "data", "in", "actions", ":", "action", "=", "serializer", ".", "dumps", "(", "action", ")", "cur_size", "=", "len", "(", "action", ")", "+", "1", "if", "data", "is", "not", "None", ":", "data", "=", "serializer", ".", "dumps", "(", "data", ")", "cur_size", "+=", "len", "(", "data", ")", "+", "1", "if", "bulk_actions", "and", "(", "size", "+", "cur_size", ">", "max_chunk_bytes", "or", "action_count", "==", "chunk_size", ")", ":", "yield", "bulk_actions", "bulk_actions", "=", "[", "]", "size", ",", "action_count", "=", "0", ",", "0", "bulk_actions", ".", "append", "(", "action", ")", "if", "data", "is", "not", "None", ":", "bulk_actions", ".", "append", "(", "data", ")", "size", "+=", "cur_size", "action_count", "+=", "1", "if", "bulk_actions", ":", "yield", "bulk_actions" ]
python
Split actions into chunks by number or size, serialize them into strings in the process.
true
2,513,546
def streaming_bulk(client, actions, chunk_size=500, max_chunk_bytes=100 * 1014 * 1024, raise_on_error=True, expand_action_callback=expand_action, raise_on_exception=True, **kwargs): """ Streaming bulk consumes actions from the iterable passed in and yields results per action. For non-streaming usecases use :func:`~elasticsearch.helpers.bulk` which is a wrapper around streaming bulk that returns summary information about the bulk operation once the entire input is consumed and sent. :arg client: instance of :class:`~elasticsearch.Elasticsearch` to use :arg actions: iterable containing the actions to be executed :arg chunk_size: number of docs in one chunk sent to es (default: 500) :arg max_chunk_bytes: the maximum size of the request in bytes (default: 100MB) :arg raise_on_error: raise ``BulkIndexError`` containing errors (as `.errors`) from the execution of the last chunk when some occur. By default we raise. :arg raise_on_exception: if ``False`` then don't propagate exceptions from call to ``bulk`` and just report the items that failed as failed. :arg expand_action_callback: callback executed on each action passed in, should return a tuple containing the action line and the data line (`None` if data line should be omitted). """ actions = map(expand_action_callback, actions) for bulk_actions in _chunk_actions(actions, chunk_size, max_chunk_bytes, client.transport.serializer): for result in _process_bulk_chunk(client, bulk_actions, raise_on_exception, raise_on_error, **kwargs): yield result
[ "def", "streaming_bulk", "(", "client", ",", "actions", ",", "chunk_size", "=", "500", ",", "max_chunk_bytes", "=", "100", "*", "1014", "*", "1024", ",", "raise_on_error", "=", "True", ",", "expand_action_callback", "=", "expand_action", ",", "raise_on_exception", "=", "True", ",", "**", "kwargs", ")", ":", "actions", "=", "map", "(", "expand_action_callback", ",", "actions", ")", "for", "bulk_actions", "in", "_chunk_actions", "(", "actions", ",", "chunk_size", ",", "max_chunk_bytes", ",", "client", ".", "transport", ".", "serializer", ")", ":", "for", "result", "in", "_process_bulk_chunk", "(", "client", ",", "bulk_actions", ",", "raise_on_exception", ",", "raise_on_error", ",", "**", "kwargs", ")", ":", "yield", "result" ]
python
Streaming bulk consumes actions from the iterable passed in and yields results per action. For non-streaming usecases use :func:`~elasticsearch.helpers.bulk` which is a wrapper around streaming bulk that returns summary information about the bulk operation once the entire input is consumed and sent. :arg client: instance of :class:`~elasticsearch.Elasticsearch` to use :arg actions: iterable containing the actions to be executed :arg chunk_size: number of docs in one chunk sent to es (default: 500) :arg max_chunk_bytes: the maximum size of the request in bytes (default: 100MB) :arg raise_on_error: raise ``BulkIndexError`` containing errors (as `.errors`) from the execution of the last chunk when some occur. By default we raise. :arg raise_on_exception: if ``False`` then don't propagate exceptions from call to ``bulk`` and just report the items that failed as failed. :arg expand_action_callback: callback executed on each action passed in, should return a tuple containing the action line and the data line (`None` if data line should be omitted).
true
2,513,547
def bulk(client, actions, stats_only=False, **kwargs): """ Helper for the :meth:`~elasticsearch.Elasticsearch.bulk` api that provides a more human friendly interface - it consumes an iterator of actions and sends them to elasticsearch in chunks. It returns a tuple with summary information - number of successfully executed actions and either list of errors or number of errors if `stats_only` is set to `True`. See :func:`~elasticsearch.helpers.streaming_bulk` for more accepted parameters :arg client: instance of :class:`~elasticsearch.Elasticsearch` to use :arg actions: iterator containing the actions :arg stats_only: if `True` only report number of successful/failed operations instead of just number of successful and a list of error responses Any additional keyword arguments will be passed to :func:`~elasticsearch.helpers.streaming_bulk` which is used to execute the operation. """ success, failed = 0, 0 # list of errors to be collected is not stats_only errors = [] for ok, item in streaming_bulk(client, actions, **kwargs): # go through request-reponse pairs and detect failures if not ok: if not stats_only: errors.append(item) failed += 1 else: success += 1 return success, failed if stats_only else errors
[ "def", "bulk", "(", "client", ",", "actions", ",", "stats_only", "=", "False", ",", "**", "kwargs", ")", ":", "success", ",", "failed", "=", "0", ",", "0", "errors", "=", "[", "]", "for", "ok", ",", "item", "in", "streaming_bulk", "(", "client", ",", "actions", ",", "**", "kwargs", ")", ":", "if", "not", "ok", ":", "if", "not", "stats_only", ":", "errors", ".", "append", "(", "item", ")", "failed", "+=", "1", "else", ":", "success", "+=", "1", "return", "success", ",", "failed", "if", "stats_only", "else", "errors" ]
python
Helper for the :meth:`~elasticsearch.Elasticsearch.bulk` api that provides a more human friendly interface - it consumes an iterator of actions and sends them to elasticsearch in chunks. It returns a tuple with summary information - number of successfully executed actions and either list of errors or number of errors if `stats_only` is set to `True`. See :func:`~elasticsearch.helpers.streaming_bulk` for more accepted parameters :arg client: instance of :class:`~elasticsearch.Elasticsearch` to use :arg actions: iterator containing the actions :arg stats_only: if `True` only report number of successful/failed operations instead of just number of successful and a list of error responses Any additional keyword arguments will be passed to :func:`~elasticsearch.helpers.streaming_bulk` which is used to execute the operation.
true
2,513,548
def parallel_bulk(client, actions, thread_count=4, chunk_size=500, max_chunk_bytes=100 * 1014 * 1024, expand_action_callback=expand_action, **kwargs): """ Parallel version of the bulk helper run in multiple threads at once. :arg client: instance of :class:`~elasticsearch.Elasticsearch` to use :arg actions: iterator containing the actions :arg thread_count: size of the threadpool to use for the bulk requests :arg chunk_size: number of docs in one chunk sent to es (default: 500) :arg max_chunk_bytes: the maximum size of the request in bytes (default: 100MB) :arg raise_on_error: raise ``BulkIndexError`` containing errors (as `.errors`) from the execution of the last chunk when some occur. By default we raise. :arg raise_on_exception: if ``False`` then don't propagate exceptions from call to ``bulk`` and just report the items that failed as failed. :arg expand_action_callback: callback executed on each action passed in, should return a tuple containing the action line and the data line (`None` if data line should be omitted). """ # Avoid importing multiprocessing unless parallel_bulk is used # to avoid exceptions on restricted environments like App Engine from multiprocessing.dummy import Pool actions = map(expand_action_callback, actions) pool = Pool(thread_count) for result in pool.imap( lambda chunk: list(_process_bulk_chunk(client, chunk, **kwargs)), _chunk_actions(actions, chunk_size, max_chunk_bytes, client.transport.serializer) ): for item in result: yield item pool.close() pool.join()
[ "def", "parallel_bulk", "(", "client", ",", "actions", ",", "thread_count", "=", "4", ",", "chunk_size", "=", "500", ",", "max_chunk_bytes", "=", "100", "*", "1014", "*", "1024", ",", "expand_action_callback", "=", "expand_action", ",", "**", "kwargs", ")", ":", "from", "multiprocessing", ".", "dummy", "import", "Pool", "actions", "=", "map", "(", "expand_action_callback", ",", "actions", ")", "pool", "=", "Pool", "(", "thread_count", ")", "for", "result", "in", "pool", ".", "imap", "(", "lambda", "chunk", ":", "list", "(", "_process_bulk_chunk", "(", "client", ",", "chunk", ",", "**", "kwargs", ")", ")", ",", "_chunk_actions", "(", "actions", ",", "chunk_size", ",", "max_chunk_bytes", ",", "client", ".", "transport", ".", "serializer", ")", ")", ":", "for", "item", "in", "result", ":", "yield", "item", "pool", ".", "close", "(", ")", "pool", ".", "join", "(", ")" ]
python
Parallel version of the bulk helper run in multiple threads at once. :arg client: instance of :class:`~elasticsearch.Elasticsearch` to use :arg actions: iterator containing the actions :arg thread_count: size of the threadpool to use for the bulk requests :arg chunk_size: number of docs in one chunk sent to es (default: 500) :arg max_chunk_bytes: the maximum size of the request in bytes (default: 100MB) :arg raise_on_error: raise ``BulkIndexError`` containing errors (as `.errors`) from the execution of the last chunk when some occur. By default we raise. :arg raise_on_exception: if ``False`` then don't propagate exceptions from call to ``bulk`` and just report the items that failed as failed. :arg expand_action_callback: callback executed on each action passed in, should return a tuple containing the action line and the data line (`None` if data line should be omitted).
true
2,515,176
def ignore_patterns(*patterns): """Function that can be used as copytree() ignore parameter. Patterns is a sequence of glob-style patterns that are used to exclude files""" import fnmatch def _ignore_patterns(path, names): ignored_names = [] for pattern in patterns: ignored_names.extend(fnmatch.filter(names, pattern)) return set(ignored_names) return _ignore_patterns
[ "def", "ignore_patterns", "(", "*", "patterns", ")", ":", "import", "fnmatch", "def", "_ignore_patterns", "(", "path", ",", "names", ")", ":", "ignored_names", "=", "[", "]", "for", "pattern", "in", "patterns", ":", "ignored_names", ".", "extend", "(", "fnmatch", ".", "filter", "(", "names", ",", "pattern", ")", ")", "return", "set", "(", "ignored_names", ")", "return", "_ignore_patterns" ]
python
Function that can be used as copytree() ignore parameter. Patterns is a sequence of glob-style patterns that are used to exclude files
true
2,515,177
def copytree(src, dst, symlinks=False, ignore=None): """Recursively copy a directory tree using copy2(). The destination directory must not already exist. If exception(s) occur, an Error is raised with a list of reasons. If the optional symlinks flag is true, symbolic links in the source tree result in symbolic links in the destination tree; if it is false, the contents of the files pointed to by symbolic links are copied. The optional ignore argument is a callable. If given, it is called with the `src` parameter, which is the directory being visited by copytree(), and `names` which is the list of `src` contents, as returned by os.listdir(): callable(src, names) -> ignored_names Since copytree() is called recursively, the callable will be called once for each directory that is copied. It returns a list of names relative to the `src` directory that should not be copied. XXX Consider this example code rather than the ultimate tool. """ from shutil import copy2, Error, copystat names = os.listdir(src) if ignore is not None: ignored_names = ignore(src, names) else: ignored_names = set() os.makedirs(dst) errors = [] for name in names: if name in ignored_names: continue srcname = os.path.join(src, name) dstname = os.path.join(dst, name) try: if symlinks and os.path.islink(srcname): linkto = os.readlink(srcname) os.symlink(linkto, dstname) elif os.path.isdir(srcname): copytree(srcname, dstname, symlinks, ignore) else: # Will raise a SpecialFileError for unsupported file types copy2(srcname, dstname) # catch the Error from the recursive copytree so that we can # continue with other files except Error as err: errors.extend(err.args[0]) except EnvironmentError as why: errors.append((srcname, dstname, str(why))) try: copystat(src, dst) except OSError as why: if WindowsError is not None and isinstance(why, WindowsError): # Copying file access times may fail on Windows pass else: errors.extend((src, dst, str(why))) if errors: raise Error(errors)
[ "def", "copytree", "(", "src", ",", "dst", ",", "symlinks", "=", "False", ",", "ignore", "=", "None", ")", ":", "from", "shutil", "import", "copy2", ",", "Error", ",", "copystat", "names", "=", "os", ".", "listdir", "(", "src", ")", "if", "ignore", "is", "not", "None", ":", "ignored_names", "=", "ignore", "(", "src", ",", "names", ")", "else", ":", "ignored_names", "=", "set", "(", ")", "os", ".", "makedirs", "(", "dst", ")", "errors", "=", "[", "]", "for", "name", "in", "names", ":", "if", "name", "in", "ignored_names", ":", "continue", "srcname", "=", "os", ".", "path", ".", "join", "(", "src", ",", "name", ")", "dstname", "=", "os", ".", "path", ".", "join", "(", "dst", ",", "name", ")", "try", ":", "if", "symlinks", "and", "os", ".", "path", ".", "islink", "(", "srcname", ")", ":", "linkto", "=", "os", ".", "readlink", "(", "srcname", ")", "os", ".", "symlink", "(", "linkto", ",", "dstname", ")", "elif", "os", ".", "path", ".", "isdir", "(", "srcname", ")", ":", "copytree", "(", "srcname", ",", "dstname", ",", "symlinks", ",", "ignore", ")", "else", ":", "copy2", "(", "srcname", ",", "dstname", ")", "except", "Error", "as", "err", ":", "errors", ".", "extend", "(", "err", ".", "args", "[", "0", "]", ")", "except", "EnvironmentError", "as", "why", ":", "errors", ".", "append", "(", "(", "srcname", ",", "dstname", ",", "str", "(", "why", ")", ")", ")", "try", ":", "copystat", "(", "src", ",", "dst", ")", "except", "OSError", "as", "why", ":", "if", "WindowsError", "is", "not", "None", "and", "isinstance", "(", "why", ",", "WindowsError", ")", ":", "pass", "else", ":", "errors", ".", "extend", "(", "(", "src", ",", "dst", ",", "str", "(", "why", ")", ")", ")", "if", "errors", ":", "raise", "Error", "(", "errors", ")" ]
python
Recursively copy a directory tree using copy2(). The destination directory must not already exist. If exception(s) occur, an Error is raised with a list of reasons. If the optional symlinks flag is true, symbolic links in the source tree result in symbolic links in the destination tree; if it is false, the contents of the files pointed to by symbolic links are copied. The optional ignore argument is a callable. If given, it is called with the `src` parameter, which is the directory being visited by copytree(), and `names` which is the list of `src` contents, as returned by os.listdir(): callable(src, names) -> ignored_names Since copytree() is called recursively, the callable will be called once for each directory that is copied. It returns a list of names relative to the `src` directory that should not be copied. XXX Consider this example code rather than the ultimate tool.
true
2,515,627
def _get_regex(data, position, dummy0, dummy1): """Decode a BSON regex to bson.regex.Regex or a python pattern object.""" pattern, position = _get_c_string(data, position) bson_flags, position = _get_c_string(data, position) bson_re = Regex(pattern, bson_flags) return bson_re, position
[ "def", "_get_regex", "(", "data", ",", "position", ",", "dummy0", ",", "dummy1", ")", ":", "pattern", ",", "position", "=", "_get_c_string", "(", "data", ",", "position", ")", "bson_flags", ",", "position", "=", "_get_c_string", "(", "data", ",", "position", ")", "bson_re", "=", "Regex", "(", "pattern", ",", "bson_flags", ")", "return", "bson_re", ",", "position" ]
python
Decode a BSON regex to bson.regex.Regex or a python pattern object.
true
2,515,629
def _bson_to_dict(data, opts): """Decode a BSON string to document_class.""" try: obj_size = _UNPACK_INT(data[:4])[0] except struct.error as exc: raise InvalidBSON(str(exc)) if obj_size != len(data): raise InvalidBSON("invalid object size") if data[obj_size - 1:obj_size] != b"\x00": raise InvalidBSON("bad eoo") try: return _elements_to_dict(data, 4, obj_size - 1, opts) except InvalidBSON: raise except Exception: # Change exception type to InvalidBSON but preserve traceback. _, exc_value, exc_tb = sys.exc_info() reraise(InvalidBSON, exc_value, exc_tb)
[ "def", "_bson_to_dict", "(", "data", ",", "opts", ")", ":", "try", ":", "obj_size", "=", "_UNPACK_INT", "(", "data", "[", ":", "4", "]", ")", "[", "0", "]", "except", "struct", ".", "error", "as", "exc", ":", "raise", "InvalidBSON", "(", "str", "(", "exc", ")", ")", "if", "obj_size", "!=", "len", "(", "data", ")", ":", "raise", "InvalidBSON", "(", "\"invalid object size\"", ")", "if", "data", "[", "obj_size", "-", "1", ":", "obj_size", "]", "!=", "b\"\\x00\"", ":", "raise", "InvalidBSON", "(", "\"bad eoo\"", ")", "try", ":", "return", "_elements_to_dict", "(", "data", ",", "4", ",", "obj_size", "-", "1", ",", "opts", ")", "except", "InvalidBSON", ":", "raise", "except", "Exception", ":", "_", ",", "exc_value", ",", "exc_tb", "=", "sys", ".", "exc_info", "(", ")", "reraise", "(", "InvalidBSON", ",", "exc_value", ",", "exc_tb", ")" ]
python
Decode a BSON string to document_class.
true
2,515,632
def _encode_code(name, value, dummy, opts): """Encode bson.code.Code.""" cstring = _make_c_string(value) cstrlen = len(cstring) if not value.scope: return b"\x0D" + name + _PACK_INT(cstrlen) + cstring scope = _dict_to_bson(value.scope, False, opts, False) full_length = _PACK_INT(8 + cstrlen + len(scope)) return b"\x0F" + name + full_length + _PACK_INT(cstrlen) + cstring + scope
[ "def", "_encode_code", "(", "name", ",", "value", ",", "dummy", ",", "opts", ")", ":", "cstring", "=", "_make_c_string", "(", "value", ")", "cstrlen", "=", "len", "(", "cstring", ")", "if", "not", "value", ".", "scope", ":", "return", "b\"\\x0D\"", "+", "name", "+", "_PACK_INT", "(", "cstrlen", ")", "+", "cstring", "scope", "=", "_dict_to_bson", "(", "value", ".", "scope", ",", "False", ",", "opts", ",", "False", ")", "full_length", "=", "_PACK_INT", "(", "8", "+", "cstrlen", "+", "len", "(", "scope", ")", ")", "return", "b\"\\x0F\"", "+", "name", "+", "full_length", "+", "_PACK_INT", "(", "cstrlen", ")", "+", "cstring", "+", "scope" ]
python
Encode bson.code.Code.
true
2,517,683
def recursive_unicode(obj): """Walks a simple data structure, converting byte strings to unicode. Supports lists, tuples, and dictionaries. """ if isinstance(obj, dict): return dict((recursive_unicode(k), recursive_unicode(v)) for (k,v) in obj.iteritems()) elif isinstance(obj, list): return list(recursive_unicode(i) for i in obj) elif isinstance(obj, tuple): return tuple(recursive_unicode(i) for i in obj) elif isinstance(obj, bytes): return to_unicode(obj) else: return obj
[ "def", "recursive_unicode", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "dict", ")", ":", "return", "dict", "(", "(", "recursive_unicode", "(", "k", ")", ",", "recursive_unicode", "(", "v", ")", ")", "for", "(", "k", ",", "v", ")", "in", "obj", ".", "iteritems", "(", ")", ")", "elif", "isinstance", "(", "obj", ",", "list", ")", ":", "return", "list", "(", "recursive_unicode", "(", "i", ")", "for", "i", "in", "obj", ")", "elif", "isinstance", "(", "obj", ",", "tuple", ")", ":", "return", "tuple", "(", "recursive_unicode", "(", "i", ")", "for", "i", "in", "obj", ")", "elif", "isinstance", "(", "obj", ",", "bytes", ")", ":", "return", "to_unicode", "(", "obj", ")", "else", ":", "return", "obj" ]
python
Walks a simple data structure, converting byte strings to unicode. Supports lists, tuples, and dictionaries.
true
2,519,651
def parent_dir(path): '''Return the parent of a directory.''' return os.path.abspath(os.path.join(path, os.pardir, os.pardir, '_build'))
[ "def", "parent_dir", "(", "path", ")", ":", "return", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "path", ",", "os", ".", "pardir", ",", "os", ".", "pardir", ",", "'_build'", ")", ")" ]
python
Return the parent of a directory.
true
2,522,682
def nice_classname(obj): """Returns a nice name for class object or class instance. >>> nice_classname(Exception()) # doctest: +ELLIPSIS '...Exception' >>> nice_classname(Exception) # doctest: +ELLIPSIS '...Exception' """ if inspect.isclass(obj): cls_name = obj.__name__ else: cls_name = obj.__class__.__name__ mod = inspect.getmodule(obj) if mod: name = mod.__name__ # jython if name.startswith('org.python.core.'): name = name[len('org.python.core.'):] return "%s.%s" % (name, cls_name) else: return cls_name
[ "def", "nice_classname", "(", "obj", ")", ":", "if", "inspect", ".", "isclass", "(", "obj", ")", ":", "cls_name", "=", "obj", ".", "__name__", "else", ":", "cls_name", "=", "obj", ".", "__class__", ".", "__name__", "mod", "=", "inspect", ".", "getmodule", "(", "obj", ")", "if", "mod", ":", "name", "=", "mod", ".", "__name__", "if", "name", ".", "startswith", "(", "'org.python.core.'", ")", ":", "name", "=", "name", "[", "len", "(", "'org.python.core.'", ")", ":", "]", "return", "\"%s.%s\"", "%", "(", "name", ",", "cls_name", ")", "else", ":", "return", "cls_name" ]
python
Returns a nice name for class object or class instance. >>> nice_classname(Exception()) # doctest: +ELLIPSIS '...Exception' >>> nice_classname(Exception) # doctest: +ELLIPSIS '...Exception'
true
2,522,683
def exc_message(exc_info): """Return the exception's message.""" exc = exc_info[1] if exc is None: # str exception result = exc_info[0] else: try: result = str(exc) except UnicodeEncodeError: try: result = unicode(exc) # flake8: noqa except UnicodeError: # Fallback to args as neither str nor # unicode(Exception(u'\xe6')) work in Python < 2.6 result = exc.args[0] return result
[ "def", "exc_message", "(", "exc_info", ")", ":", "exc", "=", "exc_info", "[", "1", "]", "if", "exc", "is", "None", ":", "result", "=", "exc_info", "[", "0", "]", "else", ":", "try", ":", "result", "=", "str", "(", "exc", ")", "except", "UnicodeEncodeError", ":", "try", ":", "result", "=", "unicode", "(", "exc", ")", "except", "UnicodeError", ":", "result", "=", "exc", ".", "args", "[", "0", "]", "return", "result" ]
python
Return the exception's message.
true
2,523,099
def relabel_nodes(G, mapping, copy=True): """Relabel the nodes of the graph G. Parameters ---------- G : graph A NetworkX graph mapping : dictionary A dictionary with the old labels as keys and new labels as values. A partial mapping is allowed. copy : bool (optional, default=True) If True return a copy, or if False relabel the nodes in place. Examples -------- >>> G=nx.path_graph(3) # nodes 0-1-2 >>> mapping={0:'a',1:'b',2:'c'} >>> H=nx.relabel_nodes(G,mapping) >>> print(sorted(H.nodes())) ['a', 'b', 'c'] >>> G=nx.path_graph(26) # nodes 0..25 >>> mapping=dict(zip(G.nodes(),"abcdefghijklmnopqrstuvwxyz")) >>> H=nx.relabel_nodes(G,mapping) # nodes a..z >>> mapping=dict(zip(G.nodes(),range(1,27))) >>> G1=nx.relabel_nodes(G,mapping) # nodes 1..26 Partial in-place mapping: >>> G=nx.path_graph(3) # nodes 0-1-2 >>> mapping={0:'a',1:'b'} # 0->'a' and 1->'b' >>> G=nx.relabel_nodes(G,mapping, copy=False) print(G.nodes()) [2, 'b', 'a'] Mapping as function: >>> G=nx.path_graph(3) >>> def mapping(x): ... return x**2 >>> H=nx.relabel_nodes(G,mapping) >>> print(H.nodes()) [0, 1, 4] Notes ----- Only the nodes specified in the mapping will be relabeled. The keyword setting copy=False modifies the graph in place. This is not always possible if the mapping is circular. In that case use copy=True. See Also -------- convert_node_labels_to_integers """ # you can pass a function f(old_label)->new_label # but we'll just make a dictionary here regardless if not hasattr(mapping, "__getitem__"): m = dict((n, mapping(n)) for n in G) else: m = mapping if copy: return _relabel_copy(G, m) else: return _relabel_inplace(G, m)
[ "def", "relabel_nodes", "(", "G", ",", "mapping", ",", "copy", "=", "True", ")", ":", "if", "not", "hasattr", "(", "mapping", ",", "\"__getitem__\"", ")", ":", "m", "=", "dict", "(", "(", "n", ",", "mapping", "(", "n", ")", ")", "for", "n", "in", "G", ")", "else", ":", "m", "=", "mapping", "if", "copy", ":", "return", "_relabel_copy", "(", "G", ",", "m", ")", "else", ":", "return", "_relabel_inplace", "(", "G", ",", "m", ")" ]
python
Relabel the nodes of the graph G. Parameters ---------- G : graph A NetworkX graph mapping : dictionary A dictionary with the old labels as keys and new labels as values. A partial mapping is allowed. copy : bool (optional, default=True) If True return a copy, or if False relabel the nodes in place. Examples -------- >>> G=nx.path_graph(3) # nodes 0-1-2 >>> mapping={0:'a',1:'b',2:'c'} >>> H=nx.relabel_nodes(G,mapping) >>> print(sorted(H.nodes())) ['a', 'b', 'c'] >>> G=nx.path_graph(26) # nodes 0..25 >>> mapping=dict(zip(G.nodes(),"abcdefghijklmnopqrstuvwxyz")) >>> H=nx.relabel_nodes(G,mapping) # nodes a..z >>> mapping=dict(zip(G.nodes(),range(1,27))) >>> G1=nx.relabel_nodes(G,mapping) # nodes 1..26 Partial in-place mapping: >>> G=nx.path_graph(3) # nodes 0-1-2 >>> mapping={0:'a',1:'b'} # 0->'a' and 1->'b' >>> G=nx.relabel_nodes(G,mapping, copy=False) print(G.nodes()) [2, 'b', 'a'] Mapping as function: >>> G=nx.path_graph(3) >>> def mapping(x): ... return x**2 >>> H=nx.relabel_nodes(G,mapping) >>> print(H.nodes()) [0, 1, 4] Notes ----- Only the nodes specified in the mapping will be relabeled. The keyword setting copy=False modifies the graph in place. This is not always possible if the mapping is circular. In that case use copy=True. See Also -------- convert_node_labels_to_integers
true
2,524,717
def iterSourceCode(paths): """ Iterate over all Python source files in C{paths}. @param paths: A list of paths. Directories will be recursed into and any .py files found will be yielded. Any non-directories will be yielded as-is. """ for path in paths: if os.path.isdir(path): for dirpath, dirnames, filenames in os.walk(path): for filename in filenames: if filename.endswith('.py'): yield os.path.join(dirpath, filename) else: yield path
[ "def", "iterSourceCode", "(", "paths", ")", ":", "for", "path", "in", "paths", ":", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "for", "dirpath", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "path", ")", ":", "for", "filename", "in", "filenames", ":", "if", "filename", ".", "endswith", "(", "'.py'", ")", ":", "yield", "os", ".", "path", ".", "join", "(", "dirpath", ",", "filename", ")", "else", ":", "yield", "path" ]
python
Iterate over all Python source files in C{paths}. @param paths: A list of paths. Directories will be recursed into and any .py files found will be yielded. Any non-directories will be yielded as-is.
true
2,527,107
def increase_indent(func): """Decorator for makin """ def wrapper(*args, **kwargs): global _debug_indent _debug_indent += 1 result = func(*args, **kwargs) _debug_indent -= 1 return result return wrapper
[ "def", "increase_indent", "(", "func", ")", ":", "def", "wrapper", "(", "*", "args", ",", "**", "kwargs", ")", ":", "global", "_debug_indent", "_debug_indent", "+=", "1", "result", "=", "func", "(", "*", "args", ",", "**", "kwargs", ")", "_debug_indent", "-=", "1", "return", "result", "return", "wrapper" ]
python
Decorator for makin
true
2,527,604
def package(options): """ Creates a tarball to use for building the system elsewhere """ import pkg_resources import tarfile import geonode version = geonode.get_version() # Use GeoNode's version for the package name. pkgname = 'GeoNode-%s-all' % version # Create the output directory. out_pkg = path(pkgname) out_pkg_tar = path("%s.tar.gz" % pkgname) # Create a distribution in zip format for the geonode python package. dist_dir = path('dist') dist_dir.rmtree() sh('python setup.py sdist --formats=zip') with pushd('package'): #Delete old tar files in that directory for f in glob.glob('GeoNode*.tar.gz'): old_package = path(f) if old_package != out_pkg_tar: old_package.remove() if out_pkg_tar.exists(): info('There is already a package for version %s' % version) return # Clean anything that is in the oupout package tree. out_pkg.rmtree() out_pkg.makedirs() support_folder = path('support') install_file = path('install.sh') # And copy the default files from the package folder. justcopy(support_folder, out_pkg / 'support') justcopy(install_file, out_pkg) geonode_dist = path('..') / 'dist' / 'GeoNode-%s.zip' % version justcopy(geonode_dist, out_pkg) rogue_dist = path('../..') / 'dist' / 'geoshape-0.1.zip' justcopy(rogue_dist, out_pkg) # Create a tar file with all files in the output package folder. tar = tarfile.open(out_pkg_tar, "w:gz") for file in out_pkg.walkfiles(): tar.add(file) # Add the README with the license and important links to documentation. tar.add('README', arcname=('%s/README.md' % out_pkg)) tar.close() # Remove all the files in the temporary output package directory. out_pkg.rmtree() # Report the info about the new package. info("%s created" % out_pkg_tar.abspath())
[ "def", "package", "(", "options", ")", ":", "import", "pkg_resources", "import", "tarfile", "import", "geonode", "version", "=", "geonode", ".", "get_version", "(", ")", "pkgname", "=", "'GeoNode-%s-all'", "%", "version", "out_pkg", "=", "path", "(", "pkgname", ")", "out_pkg_tar", "=", "path", "(", "\"%s.tar.gz\"", "%", "pkgname", ")", "dist_dir", "=", "path", "(", "'dist'", ")", "dist_dir", ".", "rmtree", "(", ")", "sh", "(", "'python setup.py sdist --formats=zip'", ")", "with", "pushd", "(", "'package'", ")", ":", "for", "f", "in", "glob", ".", "glob", "(", "'GeoNode*.tar.gz'", ")", ":", "old_package", "=", "path", "(", "f", ")", "if", "old_package", "!=", "out_pkg_tar", ":", "old_package", ".", "remove", "(", ")", "if", "out_pkg_tar", ".", "exists", "(", ")", ":", "info", "(", "'There is already a package for version %s'", "%", "version", ")", "return", "out_pkg", ".", "rmtree", "(", ")", "out_pkg", ".", "makedirs", "(", ")", "support_folder", "=", "path", "(", "'support'", ")", "install_file", "=", "path", "(", "'install.sh'", ")", "justcopy", "(", "support_folder", ",", "out_pkg", "/", "'support'", ")", "justcopy", "(", "install_file", ",", "out_pkg", ")", "geonode_dist", "=", "path", "(", "'..'", ")", "/", "'dist'", "/", "'GeoNode-%s.zip'", "%", "version", "justcopy", "(", "geonode_dist", ",", "out_pkg", ")", "rogue_dist", "=", "path", "(", "'../..'", ")", "/", "'dist'", "/", "'geoshape-0.1.zip'", "justcopy", "(", "rogue_dist", ",", "out_pkg", ")", "tar", "=", "tarfile", ".", "open", "(", "out_pkg_tar", ",", "\"w:gz\"", ")", "for", "file", "in", "out_pkg", ".", "walkfiles", "(", ")", ":", "tar", ".", "add", "(", "file", ")", "tar", ".", "add", "(", "'README'", ",", "arcname", "=", "(", "'%s/README.md'", "%", "out_pkg", ")", ")", "tar", ".", "close", "(", ")", "out_pkg", ".", "rmtree", "(", ")", "info", "(", "\"%s created\"", "%", "out_pkg_tar", ".", "abspath", "(", ")", ")" ]
python
Creates a tarball to use for building the system elsewhere
true
2,527,606
def test_integration(options): """ Run GeoNode's Integration test suite against the external apps """ _reset() # Start GeoServer call_task('start_geoserver') info("GeoNode is now available, running the tests now.") name = options.get('name', 'geonode.tests.integration') success = False try: if name == 'geonode.tests.csw': call_task('start') sh('sleep 30') call_task('setup_data') sh(('python manage.py test %s' ' --noinput --liveserver=localhost:8000' % name)) except BuildFailure, e: info('Tests failed! %s' % str(e)) else: success = True finally: # don't use call task here - it won't run since it already has stop() _reset() if not success: sys.exit(1)
[ "def", "test_integration", "(", "options", ")", ":", "_reset", "(", ")", "call_task", "(", "'start_geoserver'", ")", "info", "(", "\"GeoNode is now available, running the tests now.\"", ")", "name", "=", "options", ".", "get", "(", "'name'", ",", "'geonode.tests.integration'", ")", "success", "=", "False", "try", ":", "if", "name", "==", "'geonode.tests.csw'", ":", "call_task", "(", "'start'", ")", "sh", "(", "'sleep 30'", ")", "call_task", "(", "'setup_data'", ")", "sh", "(", "(", "'python manage.py test %s'", "' --noinput --liveserver=localhost:8000'", "%", "name", ")", ")", "except", "BuildFailure", ",", "e", ":", "info", "(", "'Tests failed! %s'", "%", "str", "(", "e", ")", ")", "else", ":", "success", "=", "True", "finally", ":", "stop", "(", ")", "_reset", "(", ")", "if", "not", "success", ":", "sys", ".", "exit", "(", "1", ")" ]
python
Run GeoNode's Integration test suite against the external apps
true
2,527,607
def setup_data(): """ Import sample data (from gisdata package) into GeoNode """ import gisdata ctype = options.get('type', None) data_dir = gisdata.GOOD_DATA if ctype in ['vector', 'raster', 'time']: data_dir = os.path.join(gisdata.GOOD_DATA, ctype) sh("python manage.py importlayers %s -v2" % data_dir)
[ "def", "setup_data", "(", ")", ":", "import", "gisdata", "ctype", "=", "options", ".", "get", "(", "'type'", ",", "None", ")", "data_dir", "=", "gisdata", ".", "GOOD_DATA", "if", "ctype", "in", "[", "'vector'", ",", "'raster'", ",", "'time'", "]", ":", "data_dir", "=", "os", ".", "path", ".", "join", "(", "gisdata", ".", "GOOD_DATA", ",", "ctype", ")", "sh", "(", "\"python manage.py importlayers %s -v2\"", "%", "data_dir", ")" ]
python
Import sample data (from gisdata package) into GeoNode
true
2,528,490
def get_manager(cls): """ Returns the appropriate Manager class for a given Model class. It does this by looking in the boto config for a section like this:: [DB] db_type = SimpleDB db_user = <aws access key id> db_passwd = <aws secret access key> db_name = my_domain [DB_TestBasic] db_type = SimpleDB db_user = <another aws access key id> db_passwd = <another aws secret access key> db_name = basic_domain db_port = 1111 The values in the DB section are "generic values" that will be used if nothing more specific is found. You can also create a section for a specific Model class that gives the db info for that class. In the example above, TestBasic is a Model subclass. """ db_user = boto.config.get('DB', 'db_user', None) db_passwd = boto.config.get('DB', 'db_passwd', None) db_type = boto.config.get('DB', 'db_type', 'SimpleDB') db_name = boto.config.get('DB', 'db_name', None) db_table = boto.config.get('DB', 'db_table', None) db_host = boto.config.get('DB', 'db_host', "sdb.amazonaws.com") db_port = boto.config.getint('DB', 'db_port', 443) enable_ssl = boto.config.getbool('DB', 'enable_ssl', True) sql_dir = boto.config.get('DB', 'sql_dir', None) debug = boto.config.getint('DB', 'debug', 0) # first see if there is a fully qualified section name in the Boto config file module_name = cls.__module__.replace('.', '_') db_section = 'DB_' + module_name + '_' + cls.__name__ if not boto.config.has_section(db_section): db_section = 'DB_' + cls.__name__ if boto.config.has_section(db_section): db_user = boto.config.get(db_section, 'db_user', db_user) db_passwd = boto.config.get(db_section, 'db_passwd', db_passwd) db_type = boto.config.get(db_section, 'db_type', db_type) db_name = boto.config.get(db_section, 'db_name', db_name) db_table = boto.config.get(db_section, 'db_table', db_table) db_host = boto.config.get(db_section, 'db_host', db_host) db_port = boto.config.getint(db_section, 'db_port', db_port) enable_ssl = boto.config.getint(db_section, 'enable_ssl', enable_ssl) debug = boto.config.getint(db_section, 'debug', debug) elif hasattr(cls, "_db_name") and cls._db_name is not None: # More specific then the generic DB config is any _db_name class property db_name = cls._db_name elif hasattr(cls.__bases__[0], "_manager"): return cls.__bases__[0]._manager if db_type == 'SimpleDB': from sdbmanager import SDBManager return SDBManager(cls, db_name, db_user, db_passwd, db_host, db_port, db_table, sql_dir, enable_ssl) elif db_type == 'PostgreSQL': from pgmanager import PGManager if db_table: return PGManager(cls, db_name, db_user, db_passwd, db_host, db_port, db_table, sql_dir, enable_ssl) else: return None elif db_type == 'XML': from xmlmanager import XMLManager return XMLManager(cls, db_name, db_user, db_passwd, db_host, db_port, db_table, sql_dir, enable_ssl) else: raise ValueError, 'Unknown db_type: %s' % db_type
[ "def", "get_manager", "(", "cls", ")", ":", "db_user", "=", "boto", ".", "config", ".", "get", "(", "'DB'", ",", "'db_user'", ",", "None", ")", "db_passwd", "=", "boto", ".", "config", ".", "get", "(", "'DB'", ",", "'db_passwd'", ",", "None", ")", "db_type", "=", "boto", ".", "config", ".", "get", "(", "'DB'", ",", "'db_type'", ",", "'SimpleDB'", ")", "db_name", "=", "boto", ".", "config", ".", "get", "(", "'DB'", ",", "'db_name'", ",", "None", ")", "db_table", "=", "boto", ".", "config", ".", "get", "(", "'DB'", ",", "'db_table'", ",", "None", ")", "db_host", "=", "boto", ".", "config", ".", "get", "(", "'DB'", ",", "'db_host'", ",", "\"sdb.amazonaws.com\"", ")", "db_port", "=", "boto", ".", "config", ".", "getint", "(", "'DB'", ",", "'db_port'", ",", "443", ")", "enable_ssl", "=", "boto", ".", "config", ".", "getbool", "(", "'DB'", ",", "'enable_ssl'", ",", "True", ")", "sql_dir", "=", "boto", ".", "config", ".", "get", "(", "'DB'", ",", "'sql_dir'", ",", "None", ")", "debug", "=", "boto", ".", "config", ".", "getint", "(", "'DB'", ",", "'debug'", ",", "0", ")", "module_name", "=", "cls", ".", "__module__", ".", "replace", "(", "'.'", ",", "'_'", ")", "db_section", "=", "'DB_'", "+", "module_name", "+", "'_'", "+", "cls", ".", "__name__", "if", "not", "boto", ".", "config", ".", "has_section", "(", "db_section", ")", ":", "db_section", "=", "'DB_'", "+", "cls", ".", "__name__", "if", "boto", ".", "config", ".", "has_section", "(", "db_section", ")", ":", "db_user", "=", "boto", ".", "config", ".", "get", "(", "db_section", ",", "'db_user'", ",", "db_user", ")", "db_passwd", "=", "boto", ".", "config", ".", "get", "(", "db_section", ",", "'db_passwd'", ",", "db_passwd", ")", "db_type", "=", "boto", ".", "config", ".", "get", "(", "db_section", ",", "'db_type'", ",", "db_type", ")", "db_name", "=", "boto", ".", "config", ".", "get", "(", "db_section", ",", "'db_name'", ",", "db_name", ")", "db_table", "=", "boto", ".", "config", ".", "get", "(", "db_section", ",", "'db_table'", ",", "db_table", ")", "db_host", "=", "boto", ".", "config", ".", "get", "(", "db_section", ",", "'db_host'", ",", "db_host", ")", "db_port", "=", "boto", ".", "config", ".", "getint", "(", "db_section", ",", "'db_port'", ",", "db_port", ")", "enable_ssl", "=", "boto", ".", "config", ".", "getint", "(", "db_section", ",", "'enable_ssl'", ",", "enable_ssl", ")", "debug", "=", "boto", ".", "config", ".", "getint", "(", "db_section", ",", "'debug'", ",", "debug", ")", "elif", "hasattr", "(", "cls", ",", "\"_db_name\"", ")", "and", "cls", ".", "_db_name", "is", "not", "None", ":", "db_name", "=", "cls", ".", "_db_name", "elif", "hasattr", "(", "cls", ".", "__bases__", "[", "0", "]", ",", "\"_manager\"", ")", ":", "return", "cls", ".", "__bases__", "[", "0", "]", ".", "_manager", "if", "db_type", "==", "'SimpleDB'", ":", "from", "sdbmanager", "import", "SDBManager", "return", "SDBManager", "(", "cls", ",", "db_name", ",", "db_user", ",", "db_passwd", ",", "db_host", ",", "db_port", ",", "db_table", ",", "sql_dir", ",", "enable_ssl", ")", "elif", "db_type", "==", "'PostgreSQL'", ":", "from", "pgmanager", "import", "PGManager", "if", "db_table", ":", "return", "PGManager", "(", "cls", ",", "db_name", ",", "db_user", ",", "db_passwd", ",", "db_host", ",", "db_port", ",", "db_table", ",", "sql_dir", ",", "enable_ssl", ")", "else", ":", "return", "None", "elif", "db_type", "==", "'XML'", ":", "from", "xmlmanager", "import", "XMLManager", "return", "XMLManager", "(", "cls", ",", "db_name", ",", "db_user", ",", "db_passwd", ",", "db_host", ",", "db_port", ",", "db_table", ",", "sql_dir", ",", "enable_ssl", ")", "else", ":", "raise", "ValueError", ",", "'Unknown db_type: %s'", "%", "db_type" ]
python
Returns the appropriate Manager class for a given Model class. It does this by looking in the boto config for a section like this:: [DB] db_type = SimpleDB db_user = <aws access key id> db_passwd = <aws secret access key> db_name = my_domain [DB_TestBasic] db_type = SimpleDB db_user = <another aws access key id> db_passwd = <another aws secret access key> db_name = basic_domain db_port = 1111 The values in the DB section are "generic values" that will be used if nothing more specific is found. You can also create a section for a specific Model class that gives the db info for that class. In the example above, TestBasic is a Model subclass.
true
2,528,592
def GetValidHostsForCert(cert): """Returns a list of valid host globs for an SSL certificate. Args: cert: A dictionary representing an SSL certificate. Returns: list: A list of valid host globs. """ if 'subjectAltName' in cert: return [x[1] for x in cert['subjectAltName'] if x[0].lower() == 'dns'] else: return [x[0][1] for x in cert['subject'] if x[0][0].lower() == 'commonname']
[ "def", "GetValidHostsForCert", "(", "cert", ")", ":", "if", "'subjectAltName'", "in", "cert", ":", "return", "[", "x", "[", "1", "]", "for", "x", "in", "cert", "[", "'subjectAltName'", "]", "if", "x", "[", "0", "]", ".", "lower", "(", ")", "==", "'dns'", "]", "else", ":", "return", "[", "x", "[", "0", "]", "[", "1", "]", "for", "x", "in", "cert", "[", "'subject'", "]", "if", "x", "[", "0", "]", "[", "0", "]", ".", "lower", "(", ")", "==", "'commonname'", "]" ]
python
Returns a list of valid host globs for an SSL certificate. Args: cert: A dictionary representing an SSL certificate. Returns: list: A list of valid host globs.
true
2,528,593
def ValidateCertificateHostname(cert, hostname): """Validates that a given hostname is valid for an SSL certificate. Args: cert: A dictionary representing an SSL certificate. hostname: The hostname to test. Returns: bool: Whether or not the hostname is valid for this certificate. """ hosts = GetValidHostsForCert(cert) boto.log.debug( "validating server certificate: hostname=%s, certificate hosts=%s", hostname, hosts) for host in hosts: host_re = host.replace('.', '\.').replace('*', '[^.]*') if re.search('^%s$' % (host_re,), hostname, re.I): return True return False
[ "def", "ValidateCertificateHostname", "(", "cert", ",", "hostname", ")", ":", "hosts", "=", "GetValidHostsForCert", "(", "cert", ")", "boto", ".", "log", ".", "debug", "(", "\"validating server certificate: hostname=%s, certificate hosts=%s\"", ",", "hostname", ",", "hosts", ")", "for", "host", "in", "hosts", ":", "host_re", "=", "host", ".", "replace", "(", "'.'", ",", "'\\.'", ")", ".", "replace", "(", "'*'", ",", "'[^.]*'", ")", "if", "re", ".", "search", "(", "'^%s$'", "%", "(", "host_re", ",", ")", ",", "hostname", ",", "re", ".", "I", ")", ":", "return", "True", "return", "False" ]
python
Validates that a given hostname is valid for an SSL certificate. Args: cert: A dictionary representing an SSL certificate. hostname: The hostname to test. Returns: bool: Whether or not the hostname is valid for this certificate.
true
2,528,724
def connect_to_region(region_name, **kw_params): """ Given a valid region name, return a :class:`boto.sns.connection.SNSConnection`. :type: str :param region_name: The name of the region to connect to. :rtype: :class:`boto.sns.connection.SNSConnection` or ``None`` :return: A connection to the given region, or None if an invalid region name is given """ for region in regions(): if region.name == region_name: return region.connect(**kw_params) return None
[ "def", "connect_to_region", "(", "region_name", ",", "**", "kw_params", ")", ":", "for", "region", "in", "regions", "(", ")", ":", "if", "region", ".", "name", "==", "region_name", ":", "return", "region", ".", "connect", "(", "**", "kw_params", ")", "return", "None" ]
python
Given a valid region name, return a :class:`boto.sns.connection.SNSConnection`. :type: str :param region_name: The name of the region to connect to. :rtype: :class:`boto.sns.connection.SNSConnection` or ``None`` :return: A connection to the given region, or None if an invalid region name is given
true
2,528,725
def get_region(region_name, **kw_params): """ Find and return a :class:`boto.regioninfo.RegionInfo` object given a region name. :type: str :param: The name of the region. :rtype: :class:`boto.regioninfo.RegionInfo` :return: The RegionInfo object for the given region or None if an invalid region name is provided. """ for region in regions(**kw_params): if region.name == region_name: return region return None
[ "def", "get_region", "(", "region_name", ",", "**", "kw_params", ")", ":", "for", "region", "in", "regions", "(", "**", "kw_params", ")", ":", "if", "region", ".", "name", "==", "region_name", ":", "return", "region", "return", "None" ]
python
Find and return a :class:`boto.regioninfo.RegionInfo` object given a region name. :type: str :param: The name of the region. :rtype: :class:`boto.regioninfo.RegionInfo` :return: The RegionInfo object for the given region or None if an invalid region name is provided.
true
2,529,695
def bucket_lister(bucket, prefix='', delimiter='', marker='', headers=None): """ A generator function for listing keys in a bucket. """ more_results = True k = None while more_results: rs = bucket.get_all_keys(prefix=prefix, marker=marker, delimiter=delimiter, headers=headers) for k in rs: yield k if k: marker = rs.next_marker or k.name more_results= rs.is_truncated
[ "def", "bucket_lister", "(", "bucket", ",", "prefix", "=", "''", ",", "delimiter", "=", "''", ",", "marker", "=", "''", ",", "headers", "=", "None", ")", ":", "more_results", "=", "True", "k", "=", "None", "while", "more_results", ":", "rs", "=", "bucket", ".", "get_all_keys", "(", "prefix", "=", "prefix", ",", "marker", "=", "marker", ",", "delimiter", "=", "delimiter", ",", "headers", "=", "headers", ")", "for", "k", "in", "rs", ":", "yield", "k", "if", "k", ":", "marker", "=", "rs", ".", "next_marker", "or", "k", ".", "name", "more_results", "=", "rs", ".", "is_truncated" ]
python
A generator function for listing keys in a bucket.
true
2,529,697
def multipart_upload_lister(bucket, key_marker='', upload_id_marker='', headers=None): """ A generator function for listing multipart uploads in a bucket. """ more_results = True k = None while more_results: rs = bucket.get_all_multipart_uploads(key_marker=key_marker, upload_id_marker=upload_id_marker, headers=headers) for k in rs: yield k key_marker = rs.next_key_marker upload_id_marker = rs.next_upload_id_marker more_results= rs.is_truncated
[ "def", "multipart_upload_lister", "(", "bucket", ",", "key_marker", "=", "''", ",", "upload_id_marker", "=", "''", ",", "headers", "=", "None", ")", ":", "more_results", "=", "True", "k", "=", "None", "while", "more_results", ":", "rs", "=", "bucket", ".", "get_all_multipart_uploads", "(", "key_marker", "=", "key_marker", ",", "upload_id_marker", "=", "upload_id_marker", ",", "headers", "=", "headers", ")", "for", "k", "in", "rs", ":", "yield", "k", "key_marker", "=", "rs", ".", "next_key_marker", "upload_id_marker", "=", "rs", ".", "next_upload_id_marker", "more_results", "=", "rs", ".", "is_truncated" ]
python
A generator function for listing multipart uploads in a bucket.
true
2,529,954
def connect_sqs(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): """ :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.sqs.connection.SQSConnection` :return: A connection to Amazon's SQS """ from botornado.sqs.connection import AsyncSQSConnection return AsyncSQSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
[ "def", "connect_sqs", "(", "aws_access_key_id", "=", "None", ",", "aws_secret_access_key", "=", "None", ",", "**", "kwargs", ")", ":", "from", "botornado", ".", "sqs", ".", "connection", "import", "AsyncSQSConnection", "return", "AsyncSQSConnection", "(", "aws_access_key_id", ",", "aws_secret_access_key", ",", "**", "kwargs", ")" ]
python
:type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.sqs.connection.SQSConnection` :return: A connection to Amazon's SQS
true
2,530,001
def part_lister(mpupload, part_number_marker=None): """ A generator function for listing parts of a multipart upload. """ more_results = True part = None while more_results: parts = mpupload.get_all_parts(None, part_number_marker) for part in parts: yield part part_number_marker = mpupload.next_part_number_marker more_results= mpupload.is_truncated
[ "def", "part_lister", "(", "mpupload", ",", "part_number_marker", "=", "None", ")", ":", "more_results", "=", "True", "part", "=", "None", "while", "more_results", ":", "parts", "=", "mpupload", ".", "get_all_parts", "(", "None", ",", "part_number_marker", ")", "for", "part", "in", "parts", ":", "yield", "part", "part_number_marker", "=", "mpupload", ".", "next_part_number_marker", "more_results", "=", "mpupload", ".", "is_truncated" ]
python
A generator function for listing parts of a multipart upload.
true
2,530,068
def sshclient_from_instance(instance, ssh_key_file, host_key_file='~/.ssh/known_hosts', user_name='root', ssh_pwd=None): """ Create and return an SSHClient object given an instance object. :type instance: :class`boto.ec2.instance.Instance` object :param instance: The instance object. :type ssh_key_file: str :param ssh_key_file: A path to the private key file used to log into instance. :type host_key_file: str :param host_key_file: A path to the known_hosts file used by the SSH client. Defaults to ~/.ssh/known_hosts :type user_name: str :param user_name: The username to use when logging into the instance. Defaults to root. :type ssh_pwd: str :param ssh_pwd: The passphrase, if any, associated with private key. """ s = FakeServer(instance, ssh_key_file) return SSHClient(s, host_key_file, user_name, ssh_pwd)
[ "def", "sshclient_from_instance", "(", "instance", ",", "ssh_key_file", ",", "host_key_file", "=", "'~/.ssh/known_hosts'", ",", "user_name", "=", "'root'", ",", "ssh_pwd", "=", "None", ")", ":", "s", "=", "FakeServer", "(", "instance", ",", "ssh_key_file", ")", "return", "SSHClient", "(", "s", ",", "host_key_file", ",", "user_name", ",", "ssh_pwd", ")" ]
python
Create and return an SSHClient object given an instance object. :type instance: :class`boto.ec2.instance.Instance` object :param instance: The instance object. :type ssh_key_file: str :param ssh_key_file: A path to the private key file used to log into instance. :type host_key_file: str :param host_key_file: A path to the known_hosts file used by the SSH client. Defaults to ~/.ssh/known_hosts :type user_name: str :param user_name: The username to use when logging into the instance. Defaults to root. :type ssh_pwd: str :param ssh_pwd: The passphrase, if any, associated with private key.
true
2,530,500
def connect_s3(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): """ :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.s3.connection.S3Connection` :return: A connection to Amazon's S3 """ from boto.s3.connection import S3Connection return S3Connection(aws_access_key_id, aws_secret_access_key, **kwargs)
[ "def", "connect_s3", "(", "aws_access_key_id", "=", "None", ",", "aws_secret_access_key", "=", "None", ",", "**", "kwargs", ")", ":", "from", "boto", ".", "s3", ".", "connection", "import", "S3Connection", "return", "S3Connection", "(", "aws_access_key_id", ",", "aws_secret_access_key", ",", "**", "kwargs", ")" ]
python
:type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.s3.connection.S3Connection` :return: A connection to Amazon's S3
true
2,530,517
def connect_euca(host=None, aws_access_key_id=None, aws_secret_access_key=None, port=8773, path='/services/Eucalyptus', is_secure=False, **kwargs): """ Connect to a Eucalyptus service. :type host: string :param host: the host name or ip address of the Eucalyptus server :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.ec2.connection.EC2Connection` :return: A connection to Eucalyptus server """ from boto.ec2 import EC2Connection from boto.ec2.regioninfo import RegionInfo # Check for values in boto config, if not supplied as args if not aws_access_key_id: aws_access_key_id = config.get('Credentials', 'euca_access_key_id', None) if not aws_secret_access_key: aws_secret_access_key = config.get('Credentials', 'euca_secret_access_key', None) if not host: host = config.get('Boto', 'eucalyptus_host', None) reg = RegionInfo(name='eucalyptus', endpoint=host) return EC2Connection(aws_access_key_id, aws_secret_access_key, region=reg, port=port, path=path, is_secure=is_secure, **kwargs)
[ "def", "connect_euca", "(", "host", "=", "None", ",", "aws_access_key_id", "=", "None", ",", "aws_secret_access_key", "=", "None", ",", "port", "=", "8773", ",", "path", "=", "'/services/Eucalyptus'", ",", "is_secure", "=", "False", ",", "**", "kwargs", ")", ":", "from", "boto", ".", "ec2", "import", "EC2Connection", "from", "boto", ".", "ec2", ".", "regioninfo", "import", "RegionInfo", "if", "not", "aws_access_key_id", ":", "aws_access_key_id", "=", "config", ".", "get", "(", "'Credentials'", ",", "'euca_access_key_id'", ",", "None", ")", "if", "not", "aws_secret_access_key", ":", "aws_secret_access_key", "=", "config", ".", "get", "(", "'Credentials'", ",", "'euca_secret_access_key'", ",", "None", ")", "if", "not", "host", ":", "host", "=", "config", ".", "get", "(", "'Boto'", ",", "'eucalyptus_host'", ",", "None", ")", "reg", "=", "RegionInfo", "(", "name", "=", "'eucalyptus'", ",", "endpoint", "=", "host", ")", "return", "EC2Connection", "(", "aws_access_key_id", ",", "aws_secret_access_key", ",", "region", "=", "reg", ",", "port", "=", "port", ",", "path", "=", "path", ",", "is_secure", "=", "is_secure", ",", "**", "kwargs", ")" ]
python
Connect to a Eucalyptus service. :type host: string :param host: the host name or ip address of the Eucalyptus server :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.ec2.connection.EC2Connection` :return: A connection to Eucalyptus server
true
2,530,518
def connect_ec2_endpoint(url, aws_access_key_id=None, aws_secret_access_key=None, **kwargs): """ Connect to an EC2 Api endpoint. Additional arguments are passed through to connect_ec2. :type url: string :param url: A url for the ec2 api endpoint to connect to :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.ec2.connection.EC2Connection` :return: A connection to Eucalyptus server """ from boto.ec2.regioninfo import RegionInfo purl = urlparse.urlparse(url) kwargs['port'] = purl.port kwargs['host'] = purl.hostname kwargs['path'] = purl.path if not 'is_secure' in kwargs: kwargs['is_secure'] = (purl.scheme == "https") kwargs['region'] = RegionInfo(name = purl.hostname, endpoint = purl.hostname) kwargs['aws_access_key_id']=aws_access_key_id kwargs['aws_secret_access_key']=aws_secret_access_key return(connect_ec2(**kwargs))
[ "def", "connect_ec2_endpoint", "(", "url", ",", "aws_access_key_id", "=", "None", ",", "aws_secret_access_key", "=", "None", ",", "**", "kwargs", ")", ":", "from", "boto", ".", "ec2", ".", "regioninfo", "import", "RegionInfo", "purl", "=", "urlparse", ".", "urlparse", "(", "url", ")", "kwargs", "[", "'port'", "]", "=", "purl", ".", "port", "kwargs", "[", "'host'", "]", "=", "purl", ".", "hostname", "kwargs", "[", "'path'", "]", "=", "purl", ".", "path", "if", "not", "'is_secure'", "in", "kwargs", ":", "kwargs", "[", "'is_secure'", "]", "=", "(", "purl", ".", "scheme", "==", "\"https\"", ")", "kwargs", "[", "'region'", "]", "=", "RegionInfo", "(", "name", "=", "purl", ".", "hostname", ",", "endpoint", "=", "purl", ".", "hostname", ")", "kwargs", "[", "'aws_access_key_id'", "]", "=", "aws_access_key_id", "kwargs", "[", "'aws_secret_access_key'", "]", "=", "aws_secret_access_key", "return", "(", "connect_ec2", "(", "**", "kwargs", ")", ")" ]
python
Connect to an EC2 Api endpoint. Additional arguments are passed through to connect_ec2. :type url: string :param url: A url for the ec2 api endpoint to connect to :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.ec2.connection.EC2Connection` :return: A connection to Eucalyptus server
true
2,530,519
def connect_walrus(host=None, aws_access_key_id=None, aws_secret_access_key=None, port=8773, path='/services/Walrus', is_secure=False, **kwargs): """ Connect to a Walrus service. :type host: string :param host: the host name or ip address of the Walrus server :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.s3.connection.S3Connection` :return: A connection to Walrus """ from boto.s3.connection import S3Connection from boto.s3.connection import OrdinaryCallingFormat # Check for values in boto config, if not supplied as args if not aws_access_key_id: aws_access_key_id = config.get('Credentials', 'euca_access_key_id', None) if not aws_secret_access_key: aws_secret_access_key = config.get('Credentials', 'euca_secret_access_key', None) if not host: host = config.get('Boto', 'walrus_host', None) return S3Connection(aws_access_key_id, aws_secret_access_key, host=host, port=port, path=path, calling_format=OrdinaryCallingFormat(), is_secure=is_secure, **kwargs)
[ "def", "connect_walrus", "(", "host", "=", "None", ",", "aws_access_key_id", "=", "None", ",", "aws_secret_access_key", "=", "None", ",", "port", "=", "8773", ",", "path", "=", "'/services/Walrus'", ",", "is_secure", "=", "False", ",", "**", "kwargs", ")", ":", "from", "boto", ".", "s3", ".", "connection", "import", "S3Connection", "from", "boto", ".", "s3", ".", "connection", "import", "OrdinaryCallingFormat", "if", "not", "aws_access_key_id", ":", "aws_access_key_id", "=", "config", ".", "get", "(", "'Credentials'", ",", "'euca_access_key_id'", ",", "None", ")", "if", "not", "aws_secret_access_key", ":", "aws_secret_access_key", "=", "config", ".", "get", "(", "'Credentials'", ",", "'euca_secret_access_key'", ",", "None", ")", "if", "not", "host", ":", "host", "=", "config", ".", "get", "(", "'Boto'", ",", "'walrus_host'", ",", "None", ")", "return", "S3Connection", "(", "aws_access_key_id", ",", "aws_secret_access_key", ",", "host", "=", "host", ",", "port", "=", "port", ",", "path", "=", "path", ",", "calling_format", "=", "OrdinaryCallingFormat", "(", ")", ",", "is_secure", "=", "is_secure", ",", "**", "kwargs", ")" ]
python
Connect to a Walrus service. :type host: string :param host: the host name or ip address of the Walrus server :type aws_access_key_id: string :param aws_access_key_id: Your AWS Access Key ID :type aws_secret_access_key: string :param aws_secret_access_key: Your AWS Secret Access Key :rtype: :class:`boto.s3.connection.S3Connection` :return: A connection to Walrus
true
2,530,522
def connect_ia(ia_access_key_id=None, ia_secret_access_key=None, is_secure=False, **kwargs): """ Connect to the Internet Archive via their S3-like API. :type ia_access_key_id: string :param ia_access_key_id: Your IA Access Key ID. This will also look in your boto config file for an entry in the Credentials section called "ia_access_key_id" :type ia_secret_access_key: string :param ia_secret_access_key: Your IA Secret Access Key. This will also look in your boto config file for an entry in the Credentials section called "ia_secret_access_key" :rtype: :class:`boto.s3.connection.S3Connection` :return: A connection to the Internet Archive """ from boto.s3.connection import S3Connection from boto.s3.connection import OrdinaryCallingFormat access_key = config.get('Credentials', 'ia_access_key_id', ia_access_key_id) secret_key = config.get('Credentials', 'ia_secret_access_key', ia_secret_access_key) return S3Connection(access_key, secret_key, host='s3.us.archive.org', calling_format=OrdinaryCallingFormat(), is_secure=is_secure, **kwargs)
[ "def", "connect_ia", "(", "ia_access_key_id", "=", "None", ",", "ia_secret_access_key", "=", "None", ",", "is_secure", "=", "False", ",", "**", "kwargs", ")", ":", "from", "boto", ".", "s3", ".", "connection", "import", "S3Connection", "from", "boto", ".", "s3", ".", "connection", "import", "OrdinaryCallingFormat", "access_key", "=", "config", ".", "get", "(", "'Credentials'", ",", "'ia_access_key_id'", ",", "ia_access_key_id", ")", "secret_key", "=", "config", ".", "get", "(", "'Credentials'", ",", "'ia_secret_access_key'", ",", "ia_secret_access_key", ")", "return", "S3Connection", "(", "access_key", ",", "secret_key", ",", "host", "=", "'s3.us.archive.org'", ",", "calling_format", "=", "OrdinaryCallingFormat", "(", ")", ",", "is_secure", "=", "is_secure", ",", "**", "kwargs", ")" ]
python
Connect to the Internet Archive via their S3-like API. :type ia_access_key_id: string :param ia_access_key_id: Your IA Access Key ID. This will also look in your boto config file for an entry in the Credentials section called "ia_access_key_id" :type ia_secret_access_key: string :param ia_secret_access_key: Your IA Secret Access Key. This will also look in your boto config file for an entry in the Credentials section called "ia_secret_access_key" :rtype: :class:`boto.s3.connection.S3Connection` :return: A connection to the Internet Archive
true
2,530,528
def storage_uri_for_key(key): """Returns a StorageUri for the given key. :type key: :class:`boto.s3.key.Key` or subclass :param key: URI naming bucket + optional object. """ if not isinstance(key, boto.s3.key.Key): raise InvalidUriError('Requested key (%s) is not a subclass of ' 'boto.s3.key.Key' % str(type(key))) prov_name = key.bucket.connection.provider.get_provider_name() uri_str = '%s://%s/%s' % (prov_name, key.bucket.name, key.name) return storage_uri(uri_str)
[ "def", "storage_uri_for_key", "(", "key", ")", ":", "if", "not", "isinstance", "(", "key", ",", "boto", ".", "s3", ".", "key", ".", "Key", ")", ":", "raise", "InvalidUriError", "(", "'Requested key (%s) is not a subclass of '", "'boto.s3.key.Key'", "%", "str", "(", "type", "(", "key", ")", ")", ")", "prov_name", "=", "key", ".", "bucket", ".", "connection", ".", "provider", ".", "get_provider_name", "(", ")", "uri_str", "=", "'%s://%s/%s'", "%", "(", "prov_name", ",", "key", ".", "bucket", ".", "name", ",", "key", ".", "name", ")", "return", "storage_uri", "(", "uri_str", ")" ]
python
Returns a StorageUri for the given key. :type key: :class:`boto.s3.key.Key` or subclass :param key: URI naming bucket + optional object.
true
2,530,616
def guess_mime_type(content, deftype): """Description: Guess the mime type of a block of text :param content: content we're finding the type of :type str: :param deftype: Default mime type :type str: :rtype: <type>: :return: <description> """ #Mappings recognized by cloudinit starts_with_mappings={ '#include' : 'text/x-include-url', '#!' : 'text/x-shellscript', '#cloud-config' : 'text/cloud-config', '#upstart-job' : 'text/upstart-job', '#part-handler' : 'text/part-handler', '#cloud-boothook' : 'text/cloud-boothook' } rtype = deftype for possible_type,mimetype in starts_with_mappings.items(): if content.startswith(possible_type): rtype = mimetype break return(rtype)
[ "def", "guess_mime_type", "(", "content", ",", "deftype", ")", ":", "starts_with_mappings", "=", "{", "'#include'", ":", "'text/x-include-url'", ",", "'#!'", ":", "'text/x-shellscript'", ",", "'#cloud-config'", ":", "'text/cloud-config'", ",", "'#upstart-job'", ":", "'text/upstart-job'", ",", "'#part-handler'", ":", "'text/part-handler'", ",", "'#cloud-boothook'", ":", "'text/cloud-boothook'", "}", "rtype", "=", "deftype", "for", "possible_type", ",", "mimetype", "in", "starts_with_mappings", ".", "items", "(", ")", ":", "if", "content", ".", "startswith", "(", "possible_type", ")", ":", "rtype", "=", "mimetype", "break", "return", "(", "rtype", ")" ]
python
Description: Guess the mime type of a block of text :param content: content we're finding the type of :type str: :param deftype: Default mime type :type str: :rtype: <type>: :return: <description>
true
2,530,990
def item_object_hook(dct): """ A custom object hook for use when decoding JSON item bodys. This hook will transform Amazon DynamoDB JSON responses to something that maps directly to native Python types. """ if len(dct.keys()) > 1: return dct if 'S' in dct: return dct['S'] if 'N' in dct: return convert_num(dct['N']) if 'SS' in dct: return set(dct['SS']) if 'NS' in dct: return set(map(convert_num, dct['NS'])) return dct
[ "def", "item_object_hook", "(", "dct", ")", ":", "if", "len", "(", "dct", ".", "keys", "(", ")", ")", ">", "1", ":", "return", "dct", "if", "'S'", "in", "dct", ":", "return", "dct", "[", "'S'", "]", "if", "'N'", "in", "dct", ":", "return", "convert_num", "(", "dct", "[", "'N'", "]", ")", "if", "'SS'", "in", "dct", ":", "return", "set", "(", "dct", "[", "'SS'", "]", ")", "if", "'NS'", "in", "dct", ":", "return", "set", "(", "map", "(", "convert_num", ",", "dct", "[", "'NS'", "]", ")", ")", "return", "dct" ]
python
A custom object hook for use when decoding JSON item bodys. This hook will transform Amazon DynamoDB JSON responses to something that maps directly to native Python types.
true
2,532,723
def header_expand(headers): """Returns an HTTP Header value string from a dictionary. Example expansion:: {'text/x-dvi': {'q': '.8', 'mxb': '100000', 'mxt': '5.0'}, 'text/x-c': {}} # Accept: text/x-dvi; q=.8; mxb=100000; mxt=5.0, text/x-c (('text/x-dvi', {'q': '.8', 'mxb': '100000', 'mxt': '5.0'}), ('text/x-c', {})) # Accept: text/x-dvi; q=.8; mxb=100000; mxt=5.0, text/x-c """ collector = [] if isinstance(headers, dict): headers = headers.items() elif isinstance(headers, basestring): return headers for i, (value, params) in enumerate(headers): _params = [] for (p_k, p_v) in params.items(): _params.append('%s=%s' % (p_k, p_v)) collector.append(value) collector.append('; ') if len(params): collector.append('; '.join(_params)) if not len(headers) == i+1: collector.append(', ') # Remove trailing separators. if collector[-1] in (', ', '; '): del collector[-1] return ''.join(collector)
[ "def", "header_expand", "(", "headers", ")", ":", "collector", "=", "[", "]", "if", "isinstance", "(", "headers", ",", "dict", ")", ":", "headers", "=", "headers", ".", "items", "(", ")", "elif", "isinstance", "(", "headers", ",", "basestring", ")", ":", "return", "headers", "for", "i", ",", "(", "value", ",", "params", ")", "in", "enumerate", "(", "headers", ")", ":", "_params", "=", "[", "]", "for", "(", "p_k", ",", "p_v", ")", "in", "params", ".", "items", "(", ")", ":", "_params", ".", "append", "(", "'%s=%s'", "%", "(", "p_k", ",", "p_v", ")", ")", "collector", ".", "append", "(", "value", ")", "collector", ".", "append", "(", "'; '", ")", "if", "len", "(", "params", ")", ":", "collector", ".", "append", "(", "'; '", ".", "join", "(", "_params", ")", ")", "if", "not", "len", "(", "headers", ")", "==", "i", "+", "1", ":", "collector", ".", "append", "(", "', '", ")", "if", "collector", "[", "-", "1", "]", "in", "(", "', '", ",", "'; '", ")", ":", "del", "collector", "[", "-", "1", "]", "return", "''", ".", "join", "(", "collector", ")" ]
python
Returns an HTTP Header value string from a dictionary. Example expansion:: {'text/x-dvi': {'q': '.8', 'mxb': '100000', 'mxt': '5.0'}, 'text/x-c': {}} # Accept: text/x-dvi; q=.8; mxb=100000; mxt=5.0, text/x-c (('text/x-dvi', {'q': '.8', 'mxb': '100000', 'mxt': '5.0'}), ('text/x-c', {})) # Accept: text/x-dvi; q=.8; mxb=100000; mxt=5.0, text/x-c
true
2,532,725
def dict_from_cookiejar(cj): """Returns a key/value dictionary from a CookieJar. :param cj: CookieJar object to extract cookies from. """ cookie_dict = {} for _, cookies in cj._cookies.items(): for _, cookies in cookies.items(): for cookie in cookies.values(): # print cookie cookie_dict[cookie.name] = cookie.value return cookie_dict
[ "def", "dict_from_cookiejar", "(", "cj", ")", ":", "cookie_dict", "=", "{", "}", "for", "_", ",", "cookies", "in", "cj", ".", "_cookies", ".", "items", "(", ")", ":", "for", "_", ",", "cookies", "in", "cookies", ".", "items", "(", ")", ":", "for", "cookie", "in", "cookies", ".", "values", "(", ")", ":", "cookie_dict", "[", "cookie", ".", "name", "]", "=", "cookie", ".", "value", "return", "cookie_dict" ]
python
Returns a key/value dictionary from a CookieJar. :param cj: CookieJar object to extract cookies from.
true
2,532,728
def get_encodings_from_content(content): """Returns encodings from given content string. :param content: bytestring to extract encodings from. """ charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I) return charset_re.findall(content)
[ "def", "get_encodings_from_content", "(", "content", ")", ":", "charset_re", "=", "re", ".", "compile", "(", "r'<meta.*?charset=[\"\\']*(.+?)[\"\\'>]'", ",", "flags", "=", "re", ".", "I", ")", "return", "charset_re", ".", "findall", "(", "content", ")" ]
python
Returns encodings from given content string. :param content: bytestring to extract encodings from.
true
2,532,729
def get_encoding_from_headers(headers): """Returns encodings from given HTTP Header Dict. :param headers: dictionary to extract encoding from. """ content_type = headers.get('content-type') if not content_type: return None content_type, params = cgi.parse_header(content_type) if 'charset' in params: return params['charset'].strip("'\"")
[ "def", "get_encoding_from_headers", "(", "headers", ")", ":", "content_type", "=", "headers", ".", "get", "(", "'content-type'", ")", "if", "not", "content_type", ":", "return", "None", "content_type", ",", "params", "=", "cgi", ".", "parse_header", "(", "content_type", ")", "if", "'charset'", "in", "params", ":", "return", "params", "[", "'charset'", "]", ".", "strip", "(", "\"'\\\"\"", ")" ]
python
Returns encodings from given HTTP Header Dict. :param headers: dictionary to extract encoding from.
true
2,532,731
def stream_decode_response_unicode(iterator, r): """Stream decodes a iterator.""" encoding = get_encoding_from_headers(r.headers) if encoding is None: for item in iterator: yield item return decoder = codecs.getincrementaldecoder(encoding)(errors='replace') for chunk in iterator: rv = decoder.decode(chunk) if rv: yield rv rv = decoder.decode('', final=True) if rv: yield rv
[ "def", "stream_decode_response_unicode", "(", "iterator", ",", "r", ")", ":", "encoding", "=", "get_encoding_from_headers", "(", "r", ".", "headers", ")", "if", "encoding", "is", "None", ":", "for", "item", "in", "iterator", ":", "yield", "item", "return", "decoder", "=", "codecs", ".", "getincrementaldecoder", "(", "encoding", ")", "(", "errors", "=", "'replace'", ")", "for", "chunk", "in", "iterator", ":", "rv", "=", "decoder", ".", "decode", "(", "chunk", ")", "if", "rv", ":", "yield", "rv", "rv", "=", "decoder", ".", "decode", "(", "''", ",", "final", "=", "True", ")", "if", "rv", ":", "yield", "rv" ]
python
Stream decodes a iterator.
true
2,533,198
def _get_candidate_names(): """Common setup sequence for all user-callable interfaces.""" global _name_sequence if _name_sequence is None: _once_lock.acquire() try: if _name_sequence is None: _name_sequence = _RandomNameSequence() finally: _once_lock.release() return _name_sequence
[ "def", "_get_candidate_names", "(", ")", ":", "global", "_name_sequence", "if", "_name_sequence", "is", "None", ":", "_once_lock", ".", "acquire", "(", ")", "try", ":", "if", "_name_sequence", "is", "None", ":", "_name_sequence", "=", "_RandomNameSequence", "(", ")", "finally", ":", "_once_lock", ".", "release", "(", ")", "return", "_name_sequence" ]
python
Common setup sequence for all user-callable interfaces.
true
2,533,200
def gettempdir(): """Accessor for tempfile.tempdir.""" global tempdir if tempdir is None: _once_lock.acquire() try: if tempdir is None: tempdir = _get_default_tempdir() finally: _once_lock.release() return tempdir
[ "def", "gettempdir", "(", ")", ":", "global", "tempdir", "if", "tempdir", "is", "None", ":", "_once_lock", ".", "acquire", "(", ")", "try", ":", "if", "tempdir", "is", "None", ":", "tempdir", "=", "_get_default_tempdir", "(", ")", "finally", ":", "_once_lock", ".", "release", "(", ")", "return", "tempdir" ]
python
Accessor for tempfile.tempdir.
true
2,533,202
def mkdtemp(suffix="", prefix=template, dir=None): """User-callable function to create and return a unique temporary directory. The return value is the pathname of the directory. Arguments are as for mkstemp, except that the 'text' argument is not accepted. The directory is readable, writable, and searchable only by the creating user. Caller is responsible for deleting the directory when done with it. """ if dir is None: dir = gettempdir() names = _get_candidate_names() for seq in range(TMP_MAX): name = next(names) file = _os.path.join(dir, prefix + name + suffix) try: _os.mkdir(file, 0o700) return file except FileExistsError: continue # try again raise FileExistsError(_errno.EEXIST, "No usable temporary directory name found")
[ "def", "mkdtemp", "(", "suffix", "=", "\"\"", ",", "prefix", "=", "template", ",", "dir", "=", "None", ")", ":", "if", "dir", "is", "None", ":", "dir", "=", "gettempdir", "(", ")", "names", "=", "_get_candidate_names", "(", ")", "for", "seq", "in", "range", "(", "TMP_MAX", ")", ":", "name", "=", "next", "(", "names", ")", "file", "=", "_os", ".", "path", ".", "join", "(", "dir", ",", "prefix", "+", "name", "+", "suffix", ")", "try", ":", "_os", ".", "mkdir", "(", "file", ",", "0o700", ")", "return", "file", "except", "FileExistsError", ":", "continue", "raise", "FileExistsError", "(", "_errno", ".", "EEXIST", ",", "\"No usable temporary directory name found\"", ")" ]
python
User-callable function to create and return a unique temporary directory. The return value is the pathname of the directory. Arguments are as for mkstemp, except that the 'text' argument is not accepted. The directory is readable, writable, and searchable only by the creating user. Caller is responsible for deleting the directory when done with it.
true
2,535,245
def ex_varassign(name, expr): """Assign an expression into a single variable. The expression may either be an `ast.expr` object or a value to be used as a literal. """ if not isinstance(expr, ast.expr): expr = ex_literal(expr) return ast.Assign([ex_lvalue(name)], expr)
[ "def", "ex_varassign", "(", "name", ",", "expr", ")", ":", "if", "not", "isinstance", "(", "expr", ",", "ast", ".", "expr", ")", ":", "expr", "=", "ex_literal", "(", "expr", ")", "return", "ast", ".", "Assign", "(", "[", "ex_lvalue", "(", "name", ")", "]", ",", "expr", ")" ]
python
Assign an expression into a single variable. The expression may either be an `ast.expr` object or a value to be used as a literal.
true
2,535,246
def ex_call(func, args): """A function-call expression with only positional parameters. The function may be an expression or the name of a function. Each argument may be an expression or a value to be used as a literal. """ if isinstance(func, str): func = ex_rvalue(func) args = list(args) for i in range(len(args)): if not isinstance(args[i], ast.expr): args[i] = ex_literal(args[i]) if sys.version_info[:2] < (3, 5): return ast.Call(func, args, [], None, None) else: return ast.Call(func, args, [])
[ "def", "ex_call", "(", "func", ",", "args", ")", ":", "if", "isinstance", "(", "func", ",", "str", ")", ":", "func", "=", "ex_rvalue", "(", "func", ")", "args", "=", "list", "(", "args", ")", "for", "i", "in", "range", "(", "len", "(", "args", ")", ")", ":", "if", "not", "isinstance", "(", "args", "[", "i", "]", ",", "ast", ".", "expr", ")", ":", "args", "[", "i", "]", "=", "ex_literal", "(", "args", "[", "i", "]", ")", "if", "sys", ".", "version_info", "[", ":", "2", "]", "<", "(", "3", ",", "5", ")", ":", "return", "ast", ".", "Call", "(", "func", ",", "args", ",", "[", "]", ",", "None", ",", "None", ")", "else", ":", "return", "ast", ".", "Call", "(", "func", ",", "args", ",", "[", "]", ")" ]
python
A function-call expression with only positional parameters. The function may be an expression or the name of a function. Each argument may be an expression or a value to be used as a literal.
true
2,535,248
def _parse(template): """Parse a top-level template string Expression. Any extraneous text is considered literal text. """ parser = Parser(template) parser.parse_expression() parts = parser.parts remainder = parser.string[parser.pos:] if remainder: parts.append(remainder) return Expression(parts)
[ "def", "_parse", "(", "template", ")", ":", "parser", "=", "Parser", "(", "template", ")", "parser", ".", "parse_expression", "(", ")", "parts", "=", "parser", ".", "parts", "remainder", "=", "parser", ".", "string", "[", "parser", ".", "pos", ":", "]", "if", "remainder", ":", "parts", ".", "append", "(", "remainder", ")", "return", "Expression", "(", "parts", ")" ]
python
Parse a top-level template string Expression. Any extraneous text is considered literal text.
true
2,535,385
def _comparator(func): """ Decorator for EnumValue rich comparison methods. """ def comparator_wrapper(self, other): try: # [PATCH] The code was originally the following: # # assert self.enumtype == other.enumtype # result = func(self.index, other.index) # # which first statement causes an issue when serializing/unserializing object # from/to memcached using pylibmc, which built a new instance of the # enumeration. Therefore two items are stated different while semantically # the same. # # These two lines are replaced by the following, which relies on the fact that # developers are not likely naming two items of distinct enumerations the same # way, and less likely to compare two items of two distinct enumerations. # # (Daniel CAUNE; daniel.caune@gmail.com; 2012-05-11) result = func(self.key, other.key) except (AssertionError, AttributeError): result = NotImplemented return result comparator_wrapper.__name__ = func.__name__ comparator_wrapper.__doc__ = getattr(float, func.__name__).__doc__ return comparator_wrapper
[ "def", "_comparator", "(", "func", ")", ":", "def", "comparator_wrapper", "(", "self", ",", "other", ")", ":", "try", ":", "result", "=", "func", "(", "self", ".", "key", ",", "other", ".", "key", ")", "except", "(", "AssertionError", ",", "AttributeError", ")", ":", "result", "=", "NotImplemented", "return", "result", "comparator_wrapper", ".", "__name__", "=", "func", ".", "__name__", "comparator_wrapper", ".", "__doc__", "=", "getattr", "(", "float", ",", "func", ".", "__name__", ")", ".", "__doc__", "return", "comparator_wrapper" ]
python
Decorator for EnumValue rich comparison methods.
true