id_within_dataset
int64
46
2.71M
snippet
stringlengths
63
481k
tokens
sequencelengths
20
15.6k
language
stringclasses
2 values
nl
stringlengths
1
32.4k
is_duplicated
bool
2 classes
2,643,056
def _pipepager(text, cmd, color): """Page through text by feeding it to another program. Invoking a pager through this might support colors. """ import subprocess env = dict(os.environ) # If we're piping to less we might support colors under the # condition that cmd_detail = cmd.rsplit('/', 1)[-1].split() if color is None and cmd_detail[0] == 'less': less_flags = os.environ.get('LESS', '') + ' '.join(cmd_detail[1:]) if not less_flags: env['LESS'] = '-R' color = True elif 'r' in less_flags or 'R' in less_flags: color = True if not color: text = strip_ansi(text) c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, env=env) encoding = get_best_encoding(c.stdin) try: c.stdin.write(text.encode(encoding, 'replace')) c.stdin.close() except (IOError, KeyboardInterrupt): pass # Less doesn't respect ^C, but catches it for its own UI purposes (aborting # search or other commands inside less). # # That means when the user hits ^C, the parent process (click) terminates, # but less is still alive, paging the output and messing up the terminal. # # If the user wants to make the pager exit on ^C, they should set # `LESS='-K'`. It's not our decision to make. while True: try: c.wait() except KeyboardInterrupt: pass else: break
[ "def", "_pipepager", "(", "text", ",", "cmd", ",", "color", ")", ":", "import", "subprocess", "env", "=", "dict", "(", "os", ".", "environ", ")", "cmd_detail", "=", "cmd", ".", "rsplit", "(", "'/'", ",", "1", ")", "[", "-", "1", "]", ".", "split", "(", ")", "if", "color", "is", "None", "and", "cmd_detail", "[", "0", "]", "==", "'less'", ":", "less_flags", "=", "os", ".", "environ", ".", "get", "(", "'LESS'", ",", "''", ")", "+", "' '", ".", "join", "(", "cmd_detail", "[", "1", ":", "]", ")", "if", "not", "less_flags", ":", "env", "[", "'LESS'", "]", "=", "'-R'", "color", "=", "True", "elif", "'r'", "in", "less_flags", "or", "'R'", "in", "less_flags", ":", "color", "=", "True", "if", "not", "color", ":", "text", "=", "strip_ansi", "(", "text", ")", "c", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "env", "=", "env", ")", "encoding", "=", "get_best_encoding", "(", "c", ".", "stdin", ")", "try", ":", "c", ".", "stdin", ".", "write", "(", "text", ".", "encode", "(", "encoding", ",", "'replace'", ")", ")", "c", ".", "stdin", ".", "close", "(", ")", "except", "(", "IOError", ",", "KeyboardInterrupt", ")", ":", "pass", "while", "True", ":", "try", ":", "c", ".", "wait", "(", ")", "except", "KeyboardInterrupt", ":", "pass", "else", ":", "break" ]
python
Page through text by feeding it to another program. Invoking a pager through this might support colors.
true
2,643,644
def _make_rewritten_pyc(state, fn, pyc, co): """Try to dump rewritten code to *pyc*.""" if sys.platform.startswith("win"): # Windows grants exclusive access to open files and doesn't have atomic # rename, so just write into the final file. _write_pyc(state, co, fn, pyc) else: # When not on windows, assume rename is atomic. Dump the code object # into a file specific to this process and atomically replace it. proc_pyc = pyc + "." + str(os.getpid()) if _write_pyc(state, co, fn, proc_pyc): os.rename(proc_pyc, pyc)
[ "def", "_make_rewritten_pyc", "(", "state", ",", "fn", ",", "pyc", ",", "co", ")", ":", "if", "sys", ".", "platform", ".", "startswith", "(", "\"win\"", ")", ":", "_write_pyc", "(", "state", ",", "co", ",", "fn", ",", "pyc", ")", "else", ":", "proc_pyc", "=", "pyc", "+", "\".\"", "+", "str", "(", "os", ".", "getpid", "(", ")", ")", "if", "_write_pyc", "(", "state", ",", "co", ",", "fn", ",", "proc_pyc", ")", ":", "os", ".", "rename", "(", "proc_pyc", ",", "pyc", ")" ]
python
Try to dump rewritten code to *pyc*.
true
2,643,646
def _saferepr(obj): """Get a safe repr of an object for assertion error messages. The assertion formatting (util.format_explanation()) requires newlines to be escaped since they are a special character for it. Normally assertion.util.format_explanation() does this but for a custom repr it is possible to contain one of the special escape sequences, especially '\n{' and '\n}' are likely to be present in JSON reprs. """ repr = py.io.saferepr(obj) if py.builtin._istext(repr): t = py.builtin.text else: t = py.builtin.bytes return repr.replace(t("\n"), t("\\n"))
[ "def", "_saferepr", "(", "obj", ")", ":", "repr", "=", "py", ".", "io", ".", "saferepr", "(", "obj", ")", "if", "py", ".", "builtin", ".", "_istext", "(", "repr", ")", ":", "t", "=", "py", ".", "builtin", ".", "text", "else", ":", "t", "=", "py", ".", "builtin", ".", "bytes", "return", "repr", ".", "replace", "(", "t", "(", "\"\\n\"", ")", ",", "t", "(", "\"\\\\n\"", ")", ")" ]
python
Get a safe repr of an object for assertion error messages. The assertion formatting (util.format_explanation()) requires newlines to be escaped since they are a special character for it. Normally assertion.util.format_explanation() does this but for a custom repr it is possible to contain one of the special escape sequences, especially '\n{' and '\n}' are likely to be present in JSON reprs.
true
2,643,647
def _format_assertmsg(obj): """Format the custom assertion message given. For strings this simply replaces newlines with '\n~' so that util.format_explanation() will preserve them instead of escaping newlines. For other objects py.io.saferepr() is used first. """ # reprlib appears to have a bug which means that if a string # contains a newline it gets escaped, however if an object has a # .__repr__() which contains newlines it does not get escaped. # However in either case we want to preserve the newline. if py.builtin._istext(obj) or py.builtin._isbytes(obj): s = obj is_repr = False else: s = py.io.saferepr(obj) is_repr = True if py.builtin._istext(s): t = py.builtin.text else: t = py.builtin.bytes s = s.replace(t("\n"), t("\n~")).replace(t("%"), t("%%")) if is_repr: s = s.replace(t("\\n"), t("\n~")) return s
[ "def", "_format_assertmsg", "(", "obj", ")", ":", "if", "py", ".", "builtin", ".", "_istext", "(", "obj", ")", "or", "py", ".", "builtin", ".", "_isbytes", "(", "obj", ")", ":", "s", "=", "obj", "is_repr", "=", "False", "else", ":", "s", "=", "py", ".", "io", ".", "saferepr", "(", "obj", ")", "is_repr", "=", "True", "if", "py", ".", "builtin", ".", "_istext", "(", "s", ")", ":", "t", "=", "py", ".", "builtin", ".", "text", "else", ":", "t", "=", "py", ".", "builtin", ".", "bytes", "s", "=", "s", ".", "replace", "(", "t", "(", "\"\\n\"", ")", ",", "t", "(", "\"\\n~\"", ")", ")", ".", "replace", "(", "t", "(", "\"%\"", ")", ",", "t", "(", "\"%%\"", ")", ")", "if", "is_repr", ":", "s", "=", "s", ".", "replace", "(", "t", "(", "\"\\\\n\"", ")", ",", "t", "(", "\"\\n~\"", ")", ")", "return", "s" ]
python
Format the custom assertion message given. For strings this simply replaces newlines with '\n~' so that util.format_explanation() will preserve them instead of escaping newlines. For other objects py.io.saferepr() is used first.
true
2,644,409
def _split_explanation(explanation): """Return a list of individual lines in the explanation This will return a list of lines split on '\n{', '\n}' and '\n~'. Any other newlines will be escaped and appear in the line as the literal '\n' characters. """ raw_lines = (explanation or u('')).split('\n') lines = [raw_lines[0]] for l in raw_lines[1:]: if l and l[0] in ['{', '}', '~', '>']: lines.append(l) else: lines[-1] += '\\n' + l return lines
[ "def", "_split_explanation", "(", "explanation", ")", ":", "raw_lines", "=", "(", "explanation", "or", "u", "(", "''", ")", ")", ".", "split", "(", "'\\n'", ")", "lines", "=", "[", "raw_lines", "[", "0", "]", "]", "for", "l", "in", "raw_lines", "[", "1", ":", "]", ":", "if", "l", "and", "l", "[", "0", "]", "in", "[", "'{'", ",", "'}'", ",", "'~'", ",", "'>'", "]", ":", "lines", ".", "append", "(", "l", ")", "else", ":", "lines", "[", "-", "1", "]", "+=", "'\\\\n'", "+", "l", "return", "lines" ]
python
Return a list of individual lines in the explanation This will return a list of lines split on '\n{', '\n}' and '\n~'. Any other newlines will be escaped and appear in the line as the literal '\n' characters.
true
2,644,589
def parse_datetime(value): """Parses a string and return a datetime.datetime. This function supports time zone offsets. When the input contains one, the output uses a timezone with a fixed offset from UTC. Raises ValueError if the input is well formatted but not a valid datetime. Returns None if the input isn't well formatted. """ match = datetime_re.match(value) if match: kw = match.groupdict() if kw['microsecond']: kw['microsecond'] = kw['microsecond'].ljust(6, '0') tzinfo = kw.pop('tzinfo') if tzinfo == 'Z': tzinfo = utc elif tzinfo is not None: offset_mins = int(tzinfo[-2:]) if len(tzinfo) > 3 else 0 offset = 60 * int(tzinfo[1:3]) + offset_mins if tzinfo[0] == '-': offset = -offset tzinfo = get_fixed_timezone(offset) kw = {k: int(v) for k, v in kw.items() if v is not None} kw['tzinfo'] = tzinfo return datetime.datetime(**kw)
[ "def", "parse_datetime", "(", "value", ")", ":", "match", "=", "datetime_re", ".", "match", "(", "value", ")", "if", "match", ":", "kw", "=", "match", ".", "groupdict", "(", ")", "if", "kw", "[", "'microsecond'", "]", ":", "kw", "[", "'microsecond'", "]", "=", "kw", "[", "'microsecond'", "]", ".", "ljust", "(", "6", ",", "'0'", ")", "tzinfo", "=", "kw", ".", "pop", "(", "'tzinfo'", ")", "if", "tzinfo", "==", "'Z'", ":", "tzinfo", "=", "utc", "elif", "tzinfo", "is", "not", "None", ":", "offset_mins", "=", "int", "(", "tzinfo", "[", "-", "2", ":", "]", ")", "if", "len", "(", "tzinfo", ")", ">", "3", "else", "0", "offset", "=", "60", "*", "int", "(", "tzinfo", "[", "1", ":", "3", "]", ")", "+", "offset_mins", "if", "tzinfo", "[", "0", "]", "==", "'-'", ":", "offset", "=", "-", "offset", "tzinfo", "=", "get_fixed_timezone", "(", "offset", ")", "kw", "=", "{", "k", ":", "int", "(", "v", ")", "for", "k", ",", "v", "in", "kw", ".", "items", "(", ")", "if", "v", "is", "not", "None", "}", "kw", "[", "'tzinfo'", "]", "=", "tzinfo", "return", "datetime", ".", "datetime", "(", "**", "kw", ")" ]
python
Parses a string and return a datetime.datetime. This function supports time zone offsets. When the input contains one, the output uses a timezone with a fixed offset from UTC. Raises ValueError if the input is well formatted but not a valid datetime. Returns None if the input isn't well formatted.
true
2,645,493
def prompt(text, default=None, hide_input=False, confirmation_prompt=False, type=None, value_proc=None, prompt_suffix=': ', show_default=True, err=False): """Prompts a user for input. This is a convenience function that can be used to prompt a user for input later. If the user aborts the input by sending a interrupt signal, this function will catch it and raise a :exc:`Abort` exception. .. versionadded:: 6.0 Added unicode support for cmd.exe on Windows. .. versionadded:: 4.0 Added the `err` parameter. :param text: the text to show for the prompt. :param default: the default value to use if no input happens. If this is not given it will prompt until it's aborted. :param hide_input: if this is set to true then the input value will be hidden. :param confirmation_prompt: asks for confirmation for the value. :param type: the type to use to check the value against. :param value_proc: if this parameter is provided it's a function that is invoked instead of the type conversion to convert a value. :param prompt_suffix: a suffix that should be added to the prompt. :param show_default: shows or hides the default value in the prompt. :param err: if set to true the file defaults to ``stderr`` instead of ``stdout``, the same as with echo. """ result = None def prompt_func(text): f = hide_input and hidden_prompt_func or visible_prompt_func try: # Write the prompt separately so that we get nice # coloring through colorama on Windows echo(text, nl=False, err=err) return f('') except (KeyboardInterrupt, EOFError): # getpass doesn't print a newline if the user aborts input with ^C. # Allegedly this behavior is inherited from getpass(3). # A doc bug has been filed at https://bugs.python.org/issue24711 if hide_input: echo(None, err=err) raise Abort() if value_proc is None: value_proc = convert_type(type, default) prompt = _build_prompt(text, prompt_suffix, show_default, default) while 1: while 1: value = prompt_func(prompt) if value: break # If a default is set and used, then the confirmation # prompt is always skipped because that's the only thing # that really makes sense. elif default is not None: return default try: result = value_proc(value) except UsageError as e: echo('Error: %s' % e.message, err=err) continue if not confirmation_prompt: return result while 1: value2 = prompt_func('Repeat for confirmation: ') if value2: break if value == value2: return result echo('Error: the two entered values do not match', err=err)
[ "def", "prompt", "(", "text", ",", "default", "=", "None", ",", "hide_input", "=", "False", ",", "confirmation_prompt", "=", "False", ",", "type", "=", "None", ",", "value_proc", "=", "None", ",", "prompt_suffix", "=", "': '", ",", "show_default", "=", "True", ",", "err", "=", "False", ")", ":", "result", "=", "None", "def", "prompt_func", "(", "text", ")", ":", "f", "=", "hide_input", "and", "hidden_prompt_func", "or", "visible_prompt_func", "try", ":", "echo", "(", "text", ",", "nl", "=", "False", ",", "err", "=", "err", ")", "return", "f", "(", "''", ")", "except", "(", "KeyboardInterrupt", ",", "EOFError", ")", ":", "if", "hide_input", ":", "echo", "(", "None", ",", "err", "=", "err", ")", "raise", "Abort", "(", ")", "if", "value_proc", "is", "None", ":", "value_proc", "=", "convert_type", "(", "type", ",", "default", ")", "prompt", "=", "_build_prompt", "(", "text", ",", "prompt_suffix", ",", "show_default", ",", "default", ")", "while", "1", ":", "while", "1", ":", "value", "=", "prompt_func", "(", "prompt", ")", "if", "value", ":", "break", "elif", "default", "is", "not", "None", ":", "return", "default", "try", ":", "result", "=", "value_proc", "(", "value", ")", "except", "UsageError", "as", "e", ":", "echo", "(", "'Error: %s'", "%", "e", ".", "message", ",", "err", "=", "err", ")", "continue", "if", "not", "confirmation_prompt", ":", "return", "result", "while", "1", ":", "value2", "=", "prompt_func", "(", "'Repeat for confirmation: '", ")", "if", "value2", ":", "break", "if", "value", "==", "value2", ":", "return", "result", "echo", "(", "'Error: the two entered values do not match'", ",", "err", "=", "err", ")" ]
python
Prompts a user for input. This is a convenience function that can be used to prompt a user for input later. If the user aborts the input by sending a interrupt signal, this function will catch it and raise a :exc:`Abort` exception. .. versionadded:: 6.0 Added unicode support for cmd.exe on Windows. .. versionadded:: 4.0 Added the `err` parameter. :param text: the text to show for the prompt. :param default: the default value to use if no input happens. If this is not given it will prompt until it's aborted. :param hide_input: if this is set to true then the input value will be hidden. :param confirmation_prompt: asks for confirmation for the value. :param type: the type to use to check the value against. :param value_proc: if this parameter is provided it's a function that is invoked instead of the type conversion to convert a value. :param prompt_suffix: a suffix that should be added to the prompt. :param show_default: shows or hides the default value in the prompt. :param err: if set to true the file defaults to ``stderr`` instead of ``stdout``, the same as with echo.
true
2,645,494
def echo_via_pager(text, color=None): """This function takes a text and shows it via an environment specific pager on stdout. .. versionchanged:: 3.0 Added the `color` flag. :param text: the text to page. :param color: controls if the pager supports ANSI colors or not. The default is autodetection. """ color = resolve_color_default(color) if not isinstance(text, string_types): text = text_type(text) from ._termui_impl import pager return pager(text + '\n', color)
[ "def", "echo_via_pager", "(", "text", ",", "color", "=", "None", ")", ":", "color", "=", "resolve_color_default", "(", "color", ")", "if", "not", "isinstance", "(", "text", ",", "string_types", ")", ":", "text", "=", "text_type", "(", "text", ")", "from", ".", "_termui_impl", "import", "pager", "return", "pager", "(", "text", "+", "'\\n'", ",", "color", ")" ]
python
This function takes a text and shows it via an environment specific pager on stdout. .. versionchanged:: 3.0 Added the `color` flag. :param text: the text to page. :param color: controls if the pager supports ANSI colors or not. The default is autodetection.
true
2,646,788
def _filter_names(names): """ Given a list of file names, return those names that should be copied. """ names = [n for n in names if n not in EXCLUDE_NAMES] # This is needed when building a distro from a working # copy (likely a checkout) rather than a pristine export: for pattern in EXCLUDE_PATTERNS: names = [n for n in names if (not fnmatch.fnmatch(n, pattern)) and (not n.endswith('.py'))] return names
[ "def", "_filter_names", "(", "names", ")", ":", "names", "=", "[", "n", "for", "n", "in", "names", "if", "n", "not", "in", "EXCLUDE_NAMES", "]", "for", "pattern", "in", "EXCLUDE_PATTERNS", ":", "names", "=", "[", "n", "for", "n", "in", "names", "if", "(", "not", "fnmatch", ".", "fnmatch", "(", "n", ",", "pattern", ")", ")", "and", "(", "not", "n", ".", "endswith", "(", "'.py'", ")", ")", "]", "return", "names" ]
python
Given a list of file names, return those names that should be copied.
true
2,646,789
def relative_to(base, relativee): """ Gets 'relativee' relative to 'basepath'. i.e., >>> relative_to('/home/', '/home/radix/') 'radix' >>> relative_to('.', '/home/radix/Projects/Twisted') # curdir is /home/radix 'Projects/Twisted' The 'relativee' must be a child of 'basepath'. """ basepath = os.path.abspath(base) relativee = os.path.abspath(relativee) if relativee.startswith(basepath): relative = relativee[len(basepath):] if relative.startswith(os.sep): relative = relative[1:] return os.path.join(base, relative) raise ValueError("%s is not a subpath of %s" % (relativee, basepath))
[ "def", "relative_to", "(", "base", ",", "relativee", ")", ":", "basepath", "=", "os", ".", "path", ".", "abspath", "(", "base", ")", "relativee", "=", "os", ".", "path", ".", "abspath", "(", "relativee", ")", "if", "relativee", ".", "startswith", "(", "basepath", ")", ":", "relative", "=", "relativee", "[", "len", "(", "basepath", ")", ":", "]", "if", "relative", ".", "startswith", "(", "os", ".", "sep", ")", ":", "relative", "=", "relative", "[", "1", ":", "]", "return", "os", ".", "path", ".", "join", "(", "base", ",", "relative", ")", "raise", "ValueError", "(", "\"%s is not a subpath of %s\"", "%", "(", "relativee", ",", "basepath", ")", ")" ]
python
Gets 'relativee' relative to 'basepath'. i.e., >>> relative_to('/home/', '/home/radix/') 'radix' >>> relative_to('.', '/home/radix/Projects/Twisted') # curdir is /home/radix 'Projects/Twisted' The 'relativee' must be a child of 'basepath'.
true
2,646,790
def get_packages(dname, pkgname=None, results=None, ignore=None, parent=None): """ Get all packages which are under dname. This is necessary for Python 2.2's distutils. Pretty similar arguments to getDataFiles, including 'parent'. """ parent = parent or "" prefix = [] if parent: prefix = [parent] bname = os.path.basename(dname) ignore = ignore or [] if bname in ignore: return [] if results is None: results = [] if pkgname is None: pkgname = [] subfiles = os.listdir(dname) abssubfiles = [os.path.join(dname, x) for x in subfiles] if '__init__.py' in subfiles: results.append(prefix + pkgname + [bname]) for subdir in filter(os.path.isdir, abssubfiles): get_packages(subdir, pkgname=pkgname + [bname], results=results, ignore=ignore, parent=parent) res = ['.'.join(result) for result in results] return res
[ "def", "get_packages", "(", "dname", ",", "pkgname", "=", "None", ",", "results", "=", "None", ",", "ignore", "=", "None", ",", "parent", "=", "None", ")", ":", "parent", "=", "parent", "or", "\"\"", "prefix", "=", "[", "]", "if", "parent", ":", "prefix", "=", "[", "parent", "]", "bname", "=", "os", ".", "path", ".", "basename", "(", "dname", ")", "ignore", "=", "ignore", "or", "[", "]", "if", "bname", "in", "ignore", ":", "return", "[", "]", "if", "results", "is", "None", ":", "results", "=", "[", "]", "if", "pkgname", "is", "None", ":", "pkgname", "=", "[", "]", "subfiles", "=", "os", ".", "listdir", "(", "dname", ")", "abssubfiles", "=", "[", "os", ".", "path", ".", "join", "(", "dname", ",", "x", ")", "for", "x", "in", "subfiles", "]", "if", "'__init__.py'", "in", "subfiles", ":", "results", ".", "append", "(", "prefix", "+", "pkgname", "+", "[", "bname", "]", ")", "for", "subdir", "in", "filter", "(", "os", ".", "path", ".", "isdir", ",", "abssubfiles", ")", ":", "get_packages", "(", "subdir", ",", "pkgname", "=", "pkgname", "+", "[", "bname", "]", ",", "results", "=", "results", ",", "ignore", "=", "ignore", ",", "parent", "=", "parent", ")", "res", "=", "[", "'.'", ".", "join", "(", "result", ")", "for", "result", "in", "results", "]", "return", "res" ]
python
Get all packages which are under dname. This is necessary for Python 2.2's distutils. Pretty similar arguments to getDataFiles, including 'parent'.
true
2,648,162
def get_random_integer(N, randfunc=None): """getRandomInteger(N:int, randfunc:callable):long Return a random number with at most N bits. If randfunc is omitted, then Random.new().read is used. This function is for internal use only and may be renamed or removed in the future. """ if randfunc is None: randfunc = Random.new().read S = randfunc(N>>3) odd_bits = N % 8 if odd_bits != 0: char = ord(randfunc(1)) >> (8-odd_bits) S = bchr(char) + S value = bytes_to_long(S) return value
[ "def", "get_random_integer", "(", "N", ",", "randfunc", "=", "None", ")", ":", "if", "randfunc", "is", "None", ":", "randfunc", "=", "Random", ".", "new", "(", ")", ".", "read", "S", "=", "randfunc", "(", "N", ">>", "3", ")", "odd_bits", "=", "N", "%", "8", "if", "odd_bits", "!=", "0", ":", "char", "=", "ord", "(", "randfunc", "(", "1", ")", ")", ">>", "(", "8", "-", "odd_bits", ")", "S", "=", "bchr", "(", "char", ")", "+", "S", "value", "=", "bytes_to_long", "(", "S", ")", "return", "value" ]
python
getRandomInteger(N:int, randfunc:callable):long Return a random number with at most N bits. If randfunc is omitted, then Random.new().read is used. This function is for internal use only and may be renamed or removed in the future.
true
2,653,044
def unpad(padded_data, block_size, style='pkcs7'): """Remove standard padding. :Parameters: padded_data : byte string A piece of data with padding that needs to be stripped. block_size : integer The block boundary to use for padding. The input length must be a multiple of ``block_size``. style : string Padding algorithm. It can be *'pkcs7'* (default), *'iso7816'* or *'x923'*. :Return: Data without padding. :Raises ValueError: if the padding is incorrect. """ pdata_len = len(padded_data) if pdata_len % block_size: raise ValueError("Input data is not padded") if style in ('pkcs7', 'x923'): padding_len = bord(padded_data[-1]) if padding_len<1 or padding_len>min(block_size, pdata_len): raise ValueError("Padding is incorrect.") if style == 'pkcs7': if padded_data[-padding_len:]!=bchr(padding_len)*padding_len: raise ValueError("PKCS#7 padding is incorrect.") else: if padded_data[-padding_len:-1]!=bchr(0)*(padding_len-1): raise ValueError("ANSI X.923 padding is incorrect.") elif style == 'iso7816': padding_len = pdata_len - padded_data.rfind(bchr(128)) if padding_len<1 or padding_len>min(block_size, pdata_len): raise ValueError("Padding is incorrect.") if padding_len>1 and padded_data[1-padding_len:]!=bchr(0)*(padding_len-1): raise ValueError("ISO 7816-4 padding is incorrect.") else: raise ValueError("Unknown padding style") return padded_data[:-padding_len]
[ "def", "unpad", "(", "padded_data", ",", "block_size", ",", "style", "=", "'pkcs7'", ")", ":", "pdata_len", "=", "len", "(", "padded_data", ")", "if", "pdata_len", "%", "block_size", ":", "raise", "ValueError", "(", "\"Input data is not padded\"", ")", "if", "style", "in", "(", "'pkcs7'", ",", "'x923'", ")", ":", "padding_len", "=", "bord", "(", "padded_data", "[", "-", "1", "]", ")", "if", "padding_len", "<", "1", "or", "padding_len", ">", "min", "(", "block_size", ",", "pdata_len", ")", ":", "raise", "ValueError", "(", "\"Padding is incorrect.\"", ")", "if", "style", "==", "'pkcs7'", ":", "if", "padded_data", "[", "-", "padding_len", ":", "]", "!=", "bchr", "(", "padding_len", ")", "*", "padding_len", ":", "raise", "ValueError", "(", "\"PKCS#7 padding is incorrect.\"", ")", "else", ":", "if", "padded_data", "[", "-", "padding_len", ":", "-", "1", "]", "!=", "bchr", "(", "0", ")", "*", "(", "padding_len", "-", "1", ")", ":", "raise", "ValueError", "(", "\"ANSI X.923 padding is incorrect.\"", ")", "elif", "style", "==", "'iso7816'", ":", "padding_len", "=", "pdata_len", "-", "padded_data", ".", "rfind", "(", "bchr", "(", "128", ")", ")", "if", "padding_len", "<", "1", "or", "padding_len", ">", "min", "(", "block_size", ",", "pdata_len", ")", ":", "raise", "ValueError", "(", "\"Padding is incorrect.\"", ")", "if", "padding_len", ">", "1", "and", "padded_data", "[", "1", "-", "padding_len", ":", "]", "!=", "bchr", "(", "0", ")", "*", "(", "padding_len", "-", "1", ")", ":", "raise", "ValueError", "(", "\"ISO 7816-4 padding is incorrect.\"", ")", "else", ":", "raise", "ValueError", "(", "\"Unknown padding style\"", ")", "return", "padded_data", "[", ":", "-", "padding_len", "]" ]
python
Remove standard padding. :Parameters: padded_data : byte string A piece of data with padding that needs to be stripped. block_size : integer The block boundary to use for padding. The input length must be a multiple of ``block_size``. style : string Padding algorithm. It can be *'pkcs7'* (default), *'iso7816'* or *'x923'*. :Return: Data without padding. :Raises ValueError: if the padding is incorrect.
true
2,654,422
def find_commands(management_dir): """ Given a path to a management directory, returns a list of all the command names that are available. Returns an empty list if no commands are defined. """ command_dir = os.path.join(management_dir, 'commands') try: return [f[:-3] for f in os.listdir(command_dir) if not f.startswith('_') and f.endswith('.py')] except OSError: return []
[ "def", "find_commands", "(", "management_dir", ")", ":", "command_dir", "=", "os", ".", "path", ".", "join", "(", "management_dir", ",", "'commands'", ")", "try", ":", "return", "[", "f", "[", ":", "-", "3", "]", "for", "f", "in", "os", ".", "listdir", "(", "command_dir", ")", "if", "not", "f", ".", "startswith", "(", "'_'", ")", "and", "f", ".", "endswith", "(", "'.py'", ")", "]", "except", "OSError", ":", "return", "[", "]" ]
python
Given a path to a management directory, returns a list of all the command names that are available. Returns an empty list if no commands are defined.
true
2,654,575
def chunks(l, n): """Yield successive n-sized chunks from l.""" if n: for i in xrange(0, len(l), n): yield l[i:i + n]
[ "def", "chunks", "(", "l", ",", "n", ")", ":", "if", "n", ":", "for", "i", "in", "xrange", "(", "0", ",", "len", "(", "l", ")", ",", "n", ")", ":", "yield", "l", "[", "i", ":", "i", "+", "n", "]" ]
python
Yield successive n-sized chunks from l.
true
2,658,324
def paginate(context, window=DEFAULT_WINDOW): """ Renders the ``pagination/pagination.html`` template, resulting in a Digg-like display of the available pages, given the current page. If there are too many pages to be displayed before and after the current page, then elipses will be used to indicate the undisplayed gap between page numbers. Requires one argument, ``context``, which should be a dictionary-like data structure and must contain the following keys: ``paginator`` A ``Paginator`` or ``QuerySetPaginator`` object. ``page_obj`` This should be the result of calling the page method on the aforementioned ``Paginator`` or ``QuerySetPaginator`` object, given the current page. This same ``context`` dictionary-like data structure may also include: ``getvars`` A dictionary of all of the **GET** parameters in the current request. This is useful to maintain certain types of state, even when requesting a different page. """ try: paginator = context['paginator'] page_obj = context['page_obj'] page_range = paginator.page_range # First and last are simply the first *n* pages and the last *n* pages, # where *n* is the current window size. first = set(list(page_range)[:window]) last = set(list(page_range)[-window:]) # Now we look around our current page, making sure that we don't wrap # around. current_start = page_obj.number-1-window if current_start < 0: current_start = 0 current_end = page_obj.number-1+window if current_end < 0: current_end = 0 current = set(list(page_range)[current_start:current_end]) pages = [] # If there's no overlap between the first set of pages and the current # set of pages, then there's a possible need for elusion. if len(first.intersection(current)) == 0: first_list = list(first) first_list.sort() second_list = list(current) second_list.sort() pages.extend(first_list) diff = second_list[0] - first_list[-1] # If there is a gap of two, between the last page of the first # set and the first page of the current set, then we're missing a # page. if diff == 2: pages.append(second_list[0] - 1) # If the difference is just one, then there's nothing to be done, # as the pages need no elusion and are correct. elif diff == 1: pass # Otherwise, there's a bigger gap which needs to be signaled for # elusion, by pushing a None value to the page list. else: pages.append(None) pages.extend(second_list) else: unioned = list(first.union(current)) unioned.sort() pages.extend(unioned) # If there's no overlap between the current set of pages and the last # set of pages, then there's a possible need for elusion. if len(current.intersection(last)) == 0: second_list = list(last) second_list.sort() diff = second_list[0] - pages[-1] # If there is a gap of two, between the last page of the current # set and the first page of the last set, then we're missing a # page. if diff == 2: pages.append(second_list[0] - 1) # If the difference is just one, then there's nothing to be done, # as the pages need no elusion and are correct. elif diff == 1: pass # Otherwise, there's a bigger gap which needs to be signaled for # elusion, by pushing a None value to the page list. else: pages.append(None) pages.extend(second_list) else: differenced = list(last.difference(current)) differenced.sort() pages.extend(differenced) to_return = { 'pages': pages, 'page_obj': page_obj, 'paginator': paginator, 'is_paginated': paginator.count > paginator.per_page, } if 'request' in context: getvars = context['request'].GET.copy() if 'page' in getvars: del getvars['page'] if len(getvars.keys()) > 0: to_return['getvars'] = "&%s" % getvars.urlencode() else: to_return['getvars'] = '' return to_return except KeyError, AttributeError: return {}
[ "def", "paginate", "(", "context", ",", "window", "=", "DEFAULT_WINDOW", ")", ":", "try", ":", "paginator", "=", "context", "[", "'paginator'", "]", "page_obj", "=", "context", "[", "'page_obj'", "]", "page_range", "=", "paginator", ".", "page_range", "first", "=", "set", "(", "list", "(", "page_range", ")", "[", ":", "window", "]", ")", "last", "=", "set", "(", "list", "(", "page_range", ")", "[", "-", "window", ":", "]", ")", "current_start", "=", "page_obj", ".", "number", "-", "1", "-", "window", "if", "current_start", "<", "0", ":", "current_start", "=", "0", "current_end", "=", "page_obj", ".", "number", "-", "1", "+", "window", "if", "current_end", "<", "0", ":", "current_end", "=", "0", "current", "=", "set", "(", "list", "(", "page_range", ")", "[", "current_start", ":", "current_end", "]", ")", "pages", "=", "[", "]", "if", "len", "(", "first", ".", "intersection", "(", "current", ")", ")", "==", "0", ":", "first_list", "=", "list", "(", "first", ")", "first_list", ".", "sort", "(", ")", "second_list", "=", "list", "(", "current", ")", "second_list", ".", "sort", "(", ")", "pages", ".", "extend", "(", "first_list", ")", "diff", "=", "second_list", "[", "0", "]", "-", "first_list", "[", "-", "1", "]", "if", "diff", "==", "2", ":", "pages", ".", "append", "(", "second_list", "[", "0", "]", "-", "1", ")", "elif", "diff", "==", "1", ":", "pass", "else", ":", "pages", ".", "append", "(", "None", ")", "pages", ".", "extend", "(", "second_list", ")", "else", ":", "unioned", "=", "list", "(", "first", ".", "union", "(", "current", ")", ")", "unioned", ".", "sort", "(", ")", "pages", ".", "extend", "(", "unioned", ")", "if", "len", "(", "current", ".", "intersection", "(", "last", ")", ")", "==", "0", ":", "second_list", "=", "list", "(", "last", ")", "second_list", ".", "sort", "(", ")", "diff", "=", "second_list", "[", "0", "]", "-", "pages", "[", "-", "1", "]", "if", "diff", "==", "2", ":", "pages", ".", "append", "(", "second_list", "[", "0", "]", "-", "1", ")", "elif", "diff", "==", "1", ":", "pass", "else", ":", "pages", ".", "append", "(", "None", ")", "pages", ".", "extend", "(", "second_list", ")", "else", ":", "differenced", "=", "list", "(", "last", ".", "difference", "(", "current", ")", ")", "differenced", ".", "sort", "(", ")", "pages", ".", "extend", "(", "differenced", ")", "to_return", "=", "{", "'pages'", ":", "pages", ",", "'page_obj'", ":", "page_obj", ",", "'paginator'", ":", "paginator", ",", "'is_paginated'", ":", "paginator", ".", "count", ">", "paginator", ".", "per_page", ",", "}", "if", "'request'", "in", "context", ":", "getvars", "=", "context", "[", "'request'", "]", ".", "GET", ".", "copy", "(", ")", "if", "'page'", "in", "getvars", ":", "del", "getvars", "[", "'page'", "]", "if", "len", "(", "getvars", ".", "keys", "(", ")", ")", ">", "0", ":", "to_return", "[", "'getvars'", "]", "=", "\"&%s\"", "%", "getvars", ".", "urlencode", "(", ")", "else", ":", "to_return", "[", "'getvars'", "]", "=", "''", "return", "to_return", "except", "KeyError", ",", "AttributeError", ":", "return", "{", "}" ]
python
Renders the ``pagination/pagination.html`` template, resulting in a Digg-like display of the available pages, given the current page. If there are too many pages to be displayed before and after the current page, then elipses will be used to indicate the undisplayed gap between page numbers. Requires one argument, ``context``, which should be a dictionary-like data structure and must contain the following keys: ``paginator`` A ``Paginator`` or ``QuerySetPaginator`` object. ``page_obj`` This should be the result of calling the page method on the aforementioned ``Paginator`` or ``QuerySetPaginator`` object, given the current page. This same ``context`` dictionary-like data structure may also include: ``getvars`` A dictionary of all of the **GET** parameters in the current request. This is useful to maintain certain types of state, even when requesting a different page.
true
2,659,394
def dictfetchall(cursor): """Returns all rows from a cursor as a dict (rather than a headerless table) From Django Documentation: https://docs.djangoproject.com/en/dev/topics/db/sql/ """ desc = cursor.description return [dict(zip([col[0] for col in desc], row)) for row in cursor.fetchall()]
[ "def", "dictfetchall", "(", "cursor", ")", ":", "desc", "=", "cursor", ".", "description", "return", "[", "dict", "(", "zip", "(", "[", "col", "[", "0", "]", "for", "col", "in", "desc", "]", ",", "row", ")", ")", "for", "row", "in", "cursor", ".", "fetchall", "(", ")", "]" ]
python
Returns all rows from a cursor as a dict (rather than a headerless table) From Django Documentation: https://docs.djangoproject.com/en/dev/topics/db/sql/
true
2,661,900
def gevent_wait_callback(conn, timeout=None): """A wait callback useful to allow gevent to work with Psycopg.""" while 1: state = conn.poll() if state == extensions.POLL_OK: break elif state == extensions.POLL_READ: wait_read(conn.fileno(), timeout=timeout) elif state == extensions.POLL_WRITE: wait_write(conn.fileno(), timeout=timeout) else: raise OperationalError( "Bad result from poll: %r" % state)
[ "def", "gevent_wait_callback", "(", "conn", ",", "timeout", "=", "None", ")", ":", "while", "1", ":", "state", "=", "conn", ".", "poll", "(", ")", "if", "state", "==", "extensions", ".", "POLL_OK", ":", "break", "elif", "state", "==", "extensions", ".", "POLL_READ", ":", "wait_read", "(", "conn", ".", "fileno", "(", ")", ",", "timeout", "=", "timeout", ")", "elif", "state", "==", "extensions", ".", "POLL_WRITE", ":", "wait_write", "(", "conn", ".", "fileno", "(", ")", ",", "timeout", "=", "timeout", ")", "else", ":", "raise", "OperationalError", "(", "\"Bad result from poll: %r\"", "%", "state", ")" ]
python
A wait callback useful to allow gevent to work with Psycopg.
true
2,663,963
def markdown(value, arg=''): """ Runs Markdown over a given value, optionally using various extensions python-markdown supports. Derived from django.contrib.markdown, which was deprecated from django. ALWAYS CLEAN INPUT BEFORE TRUSTING IT. Syntax:: {{ value|markdown:"extension1_name,extension2_name..." }} To enable safe mode, which strips raw HTML and only returns HTML generated by actual Markdown syntax, pass "safe" as the first extension in the list. If the version of Markdown in use does not support extensions, they will be silently ignored. """ import warnings warnings.warn('The markdown filter has been deprecated', category=DeprecationWarning) try: import markdown except ImportError: if settings.DEBUG: raise template.TemplateSyntaxError( "Error in 'markdown' filter: The Python markdown library isn't installed." ) return force_text(value) else: markdown_vers = getattr(markdown, "version_info", 0) if markdown_vers < (2, 1): if settings.DEBUG: raise template.TemplateSyntaxError( """ Error in 'markdown' filter: Django does not support versions of the Python markdown library < 2.1. """ ) return force_text(value) else: extensions = [e for e in arg.split(",") if e] if extensions and extensions[0] == "safe": extensions = extensions[1:] return mark_safe(markdown.markdown( force_text(value), extensions, safe_mode=True, enable_attributes=False)) else: return mark_safe(markdown.markdown( force_text(value), extensions, safe_mode=False))
[ "def", "markdown", "(", "value", ",", "arg", "=", "''", ")", ":", "import", "warnings", "warnings", ".", "warn", "(", "'The markdown filter has been deprecated'", ",", "category", "=", "DeprecationWarning", ")", "try", ":", "import", "markdown", "except", "ImportError", ":", "if", "settings", ".", "DEBUG", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "\"Error in 'markdown' filter: The Python markdown library isn't installed.\"", ")", "return", "force_text", "(", "value", ")", "else", ":", "markdown_vers", "=", "getattr", "(", "markdown", ",", "\"version_info\"", ",", "0", ")", "if", "markdown_vers", "<", "(", "2", ",", "1", ")", ":", "if", "settings", ".", "DEBUG", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "\"\"\"\n Error in 'markdown' filter:\n Django does not support versions of the Python markdown library < 2.1.\n \"\"\"", ")", "return", "force_text", "(", "value", ")", "else", ":", "extensions", "=", "[", "e", "for", "e", "in", "arg", ".", "split", "(", "\",\"", ")", "if", "e", "]", "if", "extensions", "and", "extensions", "[", "0", "]", "==", "\"safe\"", ":", "extensions", "=", "extensions", "[", "1", ":", "]", "return", "mark_safe", "(", "markdown", ".", "markdown", "(", "force_text", "(", "value", ")", ",", "extensions", ",", "safe_mode", "=", "True", ",", "enable_attributes", "=", "False", ")", ")", "else", ":", "return", "mark_safe", "(", "markdown", ".", "markdown", "(", "force_text", "(", "value", ")", ",", "extensions", ",", "safe_mode", "=", "False", ")", ")" ]
python
Runs Markdown over a given value, optionally using various extensions python-markdown supports. Derived from django.contrib.markdown, which was deprecated from django. ALWAYS CLEAN INPUT BEFORE TRUSTING IT. Syntax:: {{ value|markdown:"extension1_name,extension2_name..." }} To enable safe mode, which strips raw HTML and only returns HTML generated by actual Markdown syntax, pass "safe" as the first extension in the list. If the version of Markdown in use does not support extensions, they will be silently ignored.
true
2,665,896
def template_render(template, context=None, request=None): """ Passing Context or RequestContext to Template.render is deprecated in 1.9+, see https://github.com/django/django/pull/3883 and https://github.com/django/django/blob/1.9rc1/django/template/backends/django.py#L82-L84 :param template: Template instance :param context: dict :param request: Request instance :return: rendered template as SafeText instance """ if django.VERSION < (1, 8) or isinstance(template, Template): if request: context = RequestContext(request, context) else: context = Context(context) return template.render(context) # backends template, e.g. django.template.backends.django.Template else: return template.render(context, request=request)
[ "def", "template_render", "(", "template", ",", "context", "=", "None", ",", "request", "=", "None", ")", ":", "if", "django", ".", "VERSION", "<", "(", "1", ",", "8", ")", "or", "isinstance", "(", "template", ",", "Template", ")", ":", "if", "request", ":", "context", "=", "RequestContext", "(", "request", ",", "context", ")", "else", ":", "context", "=", "Context", "(", "context", ")", "return", "template", ".", "render", "(", "context", ")", "else", ":", "return", "template", ".", "render", "(", "context", ",", "request", "=", "request", ")" ]
python
Passing Context or RequestContext to Template.render is deprecated in 1.9+, see https://github.com/django/django/pull/3883 and https://github.com/django/django/blob/1.9rc1/django/template/backends/django.py#L82-L84 :param template: Template instance :param context: dict :param request: Request instance :return: rendered template as SafeText instance
true
2,666,192
def int_to_bin(i): """ Integer to two bytes """ # devide in two parts (bytes) i1 = i % 256 i2 = int(i / 256) # make string (little endian) return chr(i1) + chr(i2)
[ "def", "int_to_bin", "(", "i", ")", ":", "i1", "=", "i", "%", "256", "i2", "=", "int", "(", "i", "/", "256", ")", "return", "chr", "(", "i1", ")", "+", "chr", "(", "i2", ")" ]
python
Integer to two bytes
true
2,666,292
def media_type_matches(lhs, rhs): """ Returns ``True`` if the media type in the first argument <= the media type in the second argument. The media types are strings as described by the HTTP spec. Valid media type strings include: 'application/json; indent=4' 'application/json' 'text/*' '*/*' """ lhs = _MediaType(lhs) rhs = _MediaType(rhs) return lhs.match(rhs)
[ "def", "media_type_matches", "(", "lhs", ",", "rhs", ")", ":", "lhs", "=", "_MediaType", "(", "lhs", ")", "rhs", "=", "_MediaType", "(", "rhs", ")", "return", "lhs", ".", "match", "(", "rhs", ")" ]
python
Returns ``True`` if the media type in the first argument <= the media type in the second argument. The media types are strings as described by the HTTP spec. Valid media type strings include: 'application/json; indent=4' 'application/json' 'text/*' '*/*'
true
2,666,293
def order_by_precedence(media_type_lst): """ Returns a list of sets of media type strings, ordered by precedence. Precedence is determined by how specific a media type is: 3. 'type/subtype; param=val' 2. 'type/subtype' 1. 'type/*' 0. '*/*' """ ret = [set(), set(), set(), set()] for media_type in media_type_lst: precedence = _MediaType(media_type).precedence ret[3 - precedence].add(media_type) return [media_types for media_types in ret if media_types]
[ "def", "order_by_precedence", "(", "media_type_lst", ")", ":", "ret", "=", "[", "set", "(", ")", ",", "set", "(", ")", ",", "set", "(", ")", ",", "set", "(", ")", "]", "for", "media_type", "in", "media_type_lst", ":", "precedence", "=", "_MediaType", "(", "media_type", ")", ".", "precedence", "ret", "[", "3", "-", "precedence", "]", ".", "add", "(", "media_type", ")", "return", "[", "media_types", "for", "media_types", "in", "ret", "if", "media_types", "]" ]
python
Returns a list of sets of media type strings, ordered by precedence. Precedence is determined by how specific a media type is: 3. 'type/subtype; param=val' 2. 'type/subtype' 1. 'type/*' 0. '*/*'
true
2,667,228
def parse_html_list(dictionary, prefix=''): """ Used to suport list values in HTML forms. Supports lists of primitives and/or dictionaries. * List of primitives. { '[0]': 'abc', '[1]': 'def', '[2]': 'hij' } --> [ 'abc', 'def', 'hij' ] * List of dictionaries. { '[0]foo': 'abc', '[0]bar': 'def', '[1]foo': 'hij', '[1]bar': 'klm', } --> [ {'foo': 'abc', 'bar': 'def'}, {'foo': 'hij', 'bar': 'klm'} ] """ ret = {} regex = re.compile(r'^%s\[([0-9]+)\](.*)$' % re.escape(prefix)) for field, value in dictionary.items(): match = regex.match(field) if not match: continue index, key = match.groups() index = int(index) if not key: ret[index] = value elif isinstance(ret.get(index), dict): ret[index][key] = value else: ret[index] = MultiValueDict({key: [value]}) return [ret[item] for item in sorted(ret.keys())]
[ "def", "parse_html_list", "(", "dictionary", ",", "prefix", "=", "''", ")", ":", "ret", "=", "{", "}", "regex", "=", "re", ".", "compile", "(", "r'^%s\\[([0-9]+)\\](.*)$'", "%", "re", ".", "escape", "(", "prefix", ")", ")", "for", "field", ",", "value", "in", "dictionary", ".", "items", "(", ")", ":", "match", "=", "regex", ".", "match", "(", "field", ")", "if", "not", "match", ":", "continue", "index", ",", "key", "=", "match", ".", "groups", "(", ")", "index", "=", "int", "(", "index", ")", "if", "not", "key", ":", "ret", "[", "index", "]", "=", "value", "elif", "isinstance", "(", "ret", ".", "get", "(", "index", ")", ",", "dict", ")", ":", "ret", "[", "index", "]", "[", "key", "]", "=", "value", "else", ":", "ret", "[", "index", "]", "=", "MultiValueDict", "(", "{", "key", ":", "[", "value", "]", "}", ")", "return", "[", "ret", "[", "item", "]", "for", "item", "in", "sorted", "(", "ret", ".", "keys", "(", ")", ")", "]" ]
python
Used to suport list values in HTML forms. Supports lists of primitives and/or dictionaries. * List of primitives. { '[0]': 'abc', '[1]': 'def', '[2]': 'hij' } --> [ 'abc', 'def', 'hij' ] * List of dictionaries. { '[0]foo': 'abc', '[0]bar': 'def', '[1]foo': 'hij', '[1]bar': 'klm', } --> [ {'foo': 'abc', 'bar': 'def'}, {'foo': 'hij', 'bar': 'klm'} ]
true
2,667,539
def constant_time_compare(val1, val2): # noqa: C901 """ **This code was taken from the django 1.4.x codebase along with the test code** Returns True if the two strings are equal, False otherwise. The time taken is independent of the number of characters that match. """ if len(val1) != len(val2): return False if isinstance(val1, bytes): val1 = val1.decode("ascii") if isinstance(val2, bytes): val2 = val2.decode("ascii") result = 0 for x, y in zip(val1, val2): result |= ord(x) ^ ord(y) return result == 0
[ "def", "constant_time_compare", "(", "val1", ",", "val2", ")", ":", "if", "len", "(", "val1", ")", "!=", "len", "(", "val2", ")", ":", "return", "False", "if", "isinstance", "(", "val1", ",", "bytes", ")", ":", "val1", "=", "val1", ".", "decode", "(", "\"ascii\"", ")", "if", "isinstance", "(", "val2", ",", "bytes", ")", ":", "val2", "=", "val2", ".", "decode", "(", "\"ascii\"", ")", "result", "=", "0", "for", "x", ",", "y", "in", "zip", "(", "val1", ",", "val2", ")", ":", "result", "|=", "ord", "(", "x", ")", "^", "ord", "(", "y", ")", "return", "result", "==", "0" ]
python
**This code was taken from the django 1.4.x codebase along with the test code** Returns True if the two strings are equal, False otherwise. The time taken is independent of the number of characters that match.
true
2,669,192
def raise_errors_on_nested_writes(method_name, serializer, validated_data): """ Give explicit errors when users attempt to pass writable nested data. If we don't do this explicitly they'd get a less helpful error when calling `.save()` on the serializer. We don't *automatically* support these sorts of nested writes because there are too many ambiguities to define a default behavior. Eg. Suppose we have a `UserSerializer` with a nested profile. How should we handle the case of an update, where the `profile` relationship does not exist? Any of the following might be valid: * Raise an application error. * Silently ignore the nested part of the update. * Automatically create a profile instance. """ # Ensure we don't have a writable nested field. For example: # # class UserSerializer(ModelSerializer): # ... # profile = ProfileSerializer() assert not any( isinstance(field, BaseSerializer) and (key in validated_data) and isinstance(validated_data[key], (list, dict)) for key, field in serializer.fields.items() ), ( 'The `.{method_name}()` method does not support writable nested' 'fields by default.\nWrite an explicit `.{method_name}()` method for ' 'serializer `{module}.{class_name}`, or set `read_only=True` on ' 'nested serializer fields.'.format( method_name=method_name, module=serializer.__class__.__module__, class_name=serializer.__class__.__name__ ) ) # Ensure we don't have a writable dotted-source field. For example: # # class UserSerializer(ModelSerializer): # ... # address = serializer.CharField('profile.address') assert not any( '.' in field.source and (key in validated_data) and isinstance(validated_data[key], (list, dict)) for key, field in serializer.fields.items() ), ( 'The `.{method_name}()` method does not support writable dotted-source ' 'fields by default.\nWrite an explicit `.{method_name}()` method for ' 'serializer `{module}.{class_name}`, or set `read_only=True` on ' 'dotted-source serializer fields.'.format( method_name=method_name, module=serializer.__class__.__module__, class_name=serializer.__class__.__name__ ) )
[ "def", "raise_errors_on_nested_writes", "(", "method_name", ",", "serializer", ",", "validated_data", ")", ":", "assert", "not", "any", "(", "isinstance", "(", "field", ",", "BaseSerializer", ")", "and", "(", "key", "in", "validated_data", ")", "and", "isinstance", "(", "validated_data", "[", "key", "]", ",", "(", "list", ",", "dict", ")", ")", "for", "key", ",", "field", "in", "serializer", ".", "fields", ".", "items", "(", ")", ")", ",", "(", "'The `.{method_name}()` method does not support writable nested'", "'fields by default.\\nWrite an explicit `.{method_name}()` method for '", "'serializer `{module}.{class_name}`, or set `read_only=True` on '", "'nested serializer fields.'", ".", "format", "(", "method_name", "=", "method_name", ",", "module", "=", "serializer", ".", "__class__", ".", "__module__", ",", "class_name", "=", "serializer", ".", "__class__", ".", "__name__", ")", ")", "assert", "not", "any", "(", "'.'", "in", "field", ".", "source", "and", "(", "key", "in", "validated_data", ")", "and", "isinstance", "(", "validated_data", "[", "key", "]", ",", "(", "list", ",", "dict", ")", ")", "for", "key", ",", "field", "in", "serializer", ".", "fields", ".", "items", "(", ")", ")", ",", "(", "'The `.{method_name}()` method does not support writable dotted-source '", "'fields by default.\\nWrite an explicit `.{method_name}()` method for '", "'serializer `{module}.{class_name}`, or set `read_only=True` on '", "'dotted-source serializer fields.'", ".", "format", "(", "method_name", "=", "method_name", ",", "module", "=", "serializer", ".", "__class__", ".", "__module__", ",", "class_name", "=", "serializer", ".", "__class__", ".", "__name__", ")", ")" ]
python
Give explicit errors when users attempt to pass writable nested data. If we don't do this explicitly they'd get a less helpful error when calling `.save()` on the serializer. We don't *automatically* support these sorts of nested writes because there are too many ambiguities to define a default behavior. Eg. Suppose we have a `UserSerializer` with a nested profile. How should we handle the case of an update, where the `profile` relationship does not exist? Any of the following might be valid: * Raise an application error. * Silently ignore the nested part of the update. * Automatically create a profile instance.
true
2,669,822
def gridLog(**kw): """Send GLRecord, Distributed Logging Utilities If the scheme is passed as a keyword parameter the value is expected to be a callable function that takes 2 parameters: url, outputStr GRIDLOG_ON -- turn grid logging on GRIDLOG_DEST -- provide URL destination """ import os if not bool( int(os.environ.get('GRIDLOG_ON', 0)) ): return url = os.environ.get('GRIDLOG_DEST') if url is None: return ## NOTE: urlparse problem w/customized schemes try: scheme = url[:url.find('://')] send = GLRegistry[scheme] send( url, str(GLRecord(**kw)), ) except Exception, ex: print >>sys.stderr, "*** gridLog failed -- %s" %(str(kw))
[ "def", "gridLog", "(", "**", "kw", ")", ":", "import", "os", "if", "not", "bool", "(", "int", "(", "os", ".", "environ", ".", "get", "(", "'GRIDLOG_ON'", ",", "0", ")", ")", ")", ":", "return", "url", "=", "os", ".", "environ", ".", "get", "(", "'GRIDLOG_DEST'", ")", "if", "url", "is", "None", ":", "return", "try", ":", "scheme", "=", "url", "[", ":", "url", ".", "find", "(", "'://'", ")", "]", "send", "=", "GLRegistry", "[", "scheme", "]", "send", "(", "url", ",", "str", "(", "GLRecord", "(", "**", "kw", ")", ")", ",", ")", "except", "Exception", ",", "ex", ":", "print", ">>", "sys", ".", "stderr", ",", "\"*** gridLog failed -- %s\"", "%", "(", "str", "(", "kw", ")", ")" ]
python
Send GLRecord, Distributed Logging Utilities If the scheme is passed as a keyword parameter the value is expected to be a callable function that takes 2 parameters: url, outputStr GRIDLOG_ON -- turn grid logging on GRIDLOG_DEST -- provide URL destination
true
2,670,335
def GetWSAActionFault(operation, name): """Find wsa:Action attribute, and return value or WSA.FAULT for the default. """ attr = operation.faults[name].action if attr is not None: return attr return WSA.FAULT
[ "def", "GetWSAActionFault", "(", "operation", ",", "name", ")", ":", "attr", "=", "operation", ".", "faults", "[", "name", "]", ".", "action", "if", "attr", "is", "not", "None", ":", "return", "attr", "return", "WSA", ".", "FAULT" ]
python
Find wsa:Action attribute, and return value or WSA.FAULT for the default.
true
2,670,336
def GetWSAActionInput(operation): """Find wsa:Action attribute, and return value or the default.""" attr = operation.input.action if attr is not None: return attr portType = operation.getPortType() targetNamespace = portType.getTargetNamespace() ptName = portType.name msgName = operation.input.name if not msgName: msgName = operation.name + 'Request' if targetNamespace.endswith('/'): return '%s%s/%s' %(targetNamespace, ptName, msgName) return '%s/%s/%s' %(targetNamespace, ptName, msgName)
[ "def", "GetWSAActionInput", "(", "operation", ")", ":", "attr", "=", "operation", ".", "input", ".", "action", "if", "attr", "is", "not", "None", ":", "return", "attr", "portType", "=", "operation", ".", "getPortType", "(", ")", "targetNamespace", "=", "portType", ".", "getTargetNamespace", "(", ")", "ptName", "=", "portType", ".", "name", "msgName", "=", "operation", ".", "input", ".", "name", "if", "not", "msgName", ":", "msgName", "=", "operation", ".", "name", "+", "'Request'", "if", "targetNamespace", ".", "endswith", "(", "'/'", ")", ":", "return", "'%s%s/%s'", "%", "(", "targetNamespace", ",", "ptName", ",", "msgName", ")", "return", "'%s/%s/%s'", "%", "(", "targetNamespace", ",", "ptName", ",", "msgName", ")" ]
python
Find wsa:Action attribute, and return value or the default.
true
2,670,337
def GetWSAActionOutput(operation): """Find wsa:Action attribute, and return value or the default.""" attr = operation.output.action if attr is not None: return attr targetNamespace = operation.getPortType().getTargetNamespace() ptName = operation.getPortType().name msgName = operation.output.name if not msgName: msgName = operation.name + 'Response' if targetNamespace.endswith('/'): return '%s%s/%s' %(targetNamespace, ptName, msgName) return '%s/%s/%s' %(targetNamespace, ptName, msgName)
[ "def", "GetWSAActionOutput", "(", "operation", ")", ":", "attr", "=", "operation", ".", "output", ".", "action", "if", "attr", "is", "not", "None", ":", "return", "attr", "targetNamespace", "=", "operation", ".", "getPortType", "(", ")", ".", "getTargetNamespace", "(", ")", "ptName", "=", "operation", ".", "getPortType", "(", ")", ".", "name", "msgName", "=", "operation", ".", "output", ".", "name", "if", "not", "msgName", ":", "msgName", "=", "operation", ".", "name", "+", "'Response'", "if", "targetNamespace", ".", "endswith", "(", "'/'", ")", ":", "return", "'%s%s/%s'", "%", "(", "targetNamespace", ",", "ptName", ",", "msgName", ")", "return", "'%s/%s/%s'", "%", "(", "targetNamespace", ",", "ptName", ",", "msgName", ")" ]
python
Find wsa:Action attribute, and return value or the default.
true
2,670,429
def assert_bool(dist, attr, value): """Verify that value is True, False, 0, or 1""" if bool(value) != value: raise DistutilsSetupError( "%r must be a boolean value (got %r)" % (attr,value) )
[ "def", "assert_bool", "(", "dist", ",", "attr", ",", "value", ")", ":", "if", "bool", "(", "value", ")", "!=", "value", ":", "raise", "DistutilsSetupError", "(", "\"%r must be a boolean value (got %r)\"", "%", "(", "attr", ",", "value", ")", ")" ]
python
Verify that value is True, False, 0, or 1
true
2,671,789
def is_output_supports_color(): """ Returns True if the running system's terminal supports color, and False otherwise. """ plat = sys.platform supported_platform = plat != 'Pocket PC' and (plat != 'win32' or 'ANSICON' in os.environ) is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty() if not supported_platform or not is_a_tty: return False return True
[ "def", "is_output_supports_color", "(", ")", ":", "plat", "=", "sys", ".", "platform", "supported_platform", "=", "plat", "!=", "'Pocket PC'", "and", "(", "plat", "!=", "'win32'", "or", "'ANSICON'", "in", "os", ".", "environ", ")", "is_a_tty", "=", "hasattr", "(", "sys", ".", "stdout", ",", "'isatty'", ")", "and", "sys", ".", "stdout", ".", "isatty", "(", ")", "if", "not", "supported_platform", "or", "not", "is_a_tty", ":", "return", "False", "return", "True" ]
python
Returns True if the running system's terminal supports color, and False otherwise.
true
2,672,717
def GetSchema(component): """convience function for finding the parent XMLSchema instance. """ parent = component while not isinstance(parent, XMLSchema): parent = parent._parent() return parent
[ "def", "GetSchema", "(", "component", ")", ":", "parent", "=", "component", "while", "not", "isinstance", "(", "parent", ",", "XMLSchema", ")", ":", "parent", "=", "parent", ".", "_parent", "(", ")", "return", "parent" ]
python
convience function for finding the parent XMLSchema instance.
true
2,673,691
def _resolve_model(obj): """ Resolve supplied `obj` to a Django model class. `obj` must be a Django model class itself, or a string representation of one. Useful in situations like GH #1225 where Django may not have resolved a string-based reference to a model in another model's foreign key definition. String representations should have the format: 'appname.ModelName' """ if isinstance(obj, six.string_types) and len(obj.split('.')) == 2: app_name, model_name = obj.split('.') resolved_model = apps.get_model(app_name, model_name) if resolved_model is None: msg = "Django did not return a model for {0}.{1}" raise ImproperlyConfigured(msg.format(app_name, model_name)) return resolved_model elif inspect.isclass(obj) and issubclass(obj, models.Model): return obj raise ValueError("{0} is not a Django model".format(obj))
[ "def", "_resolve_model", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "six", ".", "string_types", ")", "and", "len", "(", "obj", ".", "split", "(", "'.'", ")", ")", "==", "2", ":", "app_name", ",", "model_name", "=", "obj", ".", "split", "(", "'.'", ")", "resolved_model", "=", "apps", ".", "get_model", "(", "app_name", ",", "model_name", ")", "if", "resolved_model", "is", "None", ":", "msg", "=", "\"Django did not return a model for {0}.{1}\"", "raise", "ImproperlyConfigured", "(", "msg", ".", "format", "(", "app_name", ",", "model_name", ")", ")", "return", "resolved_model", "elif", "inspect", ".", "isclass", "(", "obj", ")", "and", "issubclass", "(", "obj", ",", "models", ".", "Model", ")", ":", "return", "obj", "raise", "ValueError", "(", "\"{0} is not a Django model\"", ".", "format", "(", "obj", ")", ")" ]
python
Resolve supplied `obj` to a Django model class. `obj` must be a Django model class itself, or a string representation of one. Useful in situations like GH #1225 where Django may not have resolved a string-based reference to a model in another model's foreign key definition. String representations should have the format: 'appname.ModelName'
true
2,673,692
def get_field_info(model): """ Given a model class, returns a `FieldInfo` instance, which is a `namedtuple`, containing metadata about the various field types on the model including information about their relationships. """ opts = model._meta.concrete_model._meta pk = _get_pk(opts) fields = _get_fields(opts) forward_relations = _get_forward_relationships(opts) reverse_relations = _get_reverse_relationships(opts) fields_and_pk = _merge_fields_and_pk(pk, fields) relationships = _merge_relationships(forward_relations, reverse_relations) return FieldInfo(pk, fields, forward_relations, reverse_relations, fields_and_pk, relationships)
[ "def", "get_field_info", "(", "model", ")", ":", "opts", "=", "model", ".", "_meta", ".", "concrete_model", ".", "_meta", "pk", "=", "_get_pk", "(", "opts", ")", "fields", "=", "_get_fields", "(", "opts", ")", "forward_relations", "=", "_get_forward_relationships", "(", "opts", ")", "reverse_relations", "=", "_get_reverse_relationships", "(", "opts", ")", "fields_and_pk", "=", "_merge_fields_and_pk", "(", "pk", ",", "fields", ")", "relationships", "=", "_merge_relationships", "(", "forward_relations", ",", "reverse_relations", ")", "return", "FieldInfo", "(", "pk", ",", "fields", ",", "forward_relations", ",", "reverse_relations", ",", "fields_and_pk", ",", "relationships", ")" ]
python
Given a model class, returns a `FieldInfo` instance, which is a `namedtuple`, containing metadata about the various field types on the model including information about their relationships.
true
2,673,699
def is_abstract_model(model): """ Given a model class, returns a boolean True if it is abstract and False if it is not. """ return hasattr(model, '_meta') and hasattr(model._meta, 'abstract') and model._meta.abstract
[ "def", "is_abstract_model", "(", "model", ")", ":", "return", "hasattr", "(", "model", ",", "'_meta'", ")", "and", "hasattr", "(", "model", ".", "_meta", ",", "'abstract'", ")", "and", "model", ".", "_meta", ".", "abstract" ]
python
Given a model class, returns a boolean True if it is abstract and False if it is not.
true
2,674,367
def is_simple_callable(obj): """ True if the object is a callable that takes no arguments. """ function = inspect.isfunction(obj) method = inspect.ismethod(obj) if not (function or method): return False args, _, _, defaults = inspect.getargspec(obj) len_args = len(args) if function else len(args) - 1 len_defaults = len(defaults) if defaults else 0 return len_args <= len_defaults
[ "def", "is_simple_callable", "(", "obj", ")", ":", "function", "=", "inspect", ".", "isfunction", "(", "obj", ")", "method", "=", "inspect", ".", "ismethod", "(", "obj", ")", "if", "not", "(", "function", "or", "method", ")", ":", "return", "False", "args", ",", "_", ",", "_", ",", "defaults", "=", "inspect", ".", "getargspec", "(", "obj", ")", "len_args", "=", "len", "(", "args", ")", "if", "function", "else", "len", "(", "args", ")", "-", "1", "len_defaults", "=", "len", "(", "defaults", ")", "if", "defaults", "else", "0", "return", "len_args", "<=", "len_defaults" ]
python
True if the object is a callable that takes no arguments.
true
2,674,369
def set_value(dictionary, keys, value): """ Similar to Python's built in `dictionary[key] = value`, but takes a list of nested keys instead of a single key. set_value({'a': 1}, [], {'b': 2}) -> {'a': 1, 'b': 2} set_value({'a': 1}, ['x'], 2) -> {'a': 1, 'x': 2} set_value({'a': 1}, ['x', 'y'], 2) -> {'a': 1, 'x': {'y': 2}} """ if not keys: dictionary.update(value) return for key in keys[:-1]: if key not in dictionary: dictionary[key] = {} dictionary = dictionary[key] dictionary[keys[-1]] = value
[ "def", "set_value", "(", "dictionary", ",", "keys", ",", "value", ")", ":", "if", "not", "keys", ":", "dictionary", ".", "update", "(", "value", ")", "return", "for", "key", "in", "keys", "[", ":", "-", "1", "]", ":", "if", "key", "not", "in", "dictionary", ":", "dictionary", "[", "key", "]", "=", "{", "}", "dictionary", "=", "dictionary", "[", "key", "]", "dictionary", "[", "keys", "[", "-", "1", "]", "]", "=", "value" ]
python
Similar to Python's built in `dictionary[key] = value`, but takes a list of nested keys instead of a single key. set_value({'a': 1}, [], {'b': 2}) -> {'a': 1, 'b': 2} set_value({'a': 1}, ['x'], 2) -> {'a': 1, 'x': 2} set_value({'a': 1}, ['x', 'y'], 2) -> {'a': 1, 'x': {'y': 2}}
true
2,674,370
def to_choices_dict(choices): """ Convert choices into key/value dicts. pairwise_choices([1]) -> {1: 1} pairwise_choices([(1, '1st'), (2, '2nd')]) -> {1: '1st', 2: '2nd'} pairwise_choices([('Group', ((1, '1st'), 2))]) -> {'Group': {1: '1st', 2: '2nd'}} """ # Allow single, paired or grouped choices style: # choices = [1, 2, 3] # choices = [(1, 'First'), (2, 'Second'), (3, 'Third')] # choices = [('Category', ((1, 'First'), (2, 'Second'))), (3, 'Third')] ret = OrderedDict() for choice in choices: if (not isinstance(choice, (list, tuple))): # single choice ret[choice] = choice else: key, value = choice if isinstance(value, (list, tuple)): # grouped choices (category, sub choices) ret[key] = to_choices_dict(value) else: # paired choice (key, display value) ret[key] = value return ret
[ "def", "to_choices_dict", "(", "choices", ")", ":", "ret", "=", "OrderedDict", "(", ")", "for", "choice", "in", "choices", ":", "if", "(", "not", "isinstance", "(", "choice", ",", "(", "list", ",", "tuple", ")", ")", ")", ":", "ret", "[", "choice", "]", "=", "choice", "else", ":", "key", ",", "value", "=", "choice", "if", "isinstance", "(", "value", ",", "(", "list", ",", "tuple", ")", ")", ":", "ret", "[", "key", "]", "=", "to_choices_dict", "(", "value", ")", "else", ":", "ret", "[", "key", "]", "=", "value", "return", "ret" ]
python
Convert choices into key/value dicts. pairwise_choices([1]) -> {1: 1} pairwise_choices([(1, '1st'), (2, '2nd')]) -> {1: '1st', 2: '2nd'} pairwise_choices([('Group', ((1, '1st'), 2))]) -> {'Group': {1: '1st', 2: '2nd'}}
true
2,674,371
def flatten_choices_dict(choices): """ Convert a group choices dict into a flat dict of choices. flatten_choices({1: '1st', 2: '2nd'}) -> {1: '1st', 2: '2nd'} flatten_choices({'Group': {1: '1st', 2: '2nd'}}) -> {1: '1st', 2: '2nd'} """ ret = OrderedDict() for key, value in choices.items(): if isinstance(value, dict): # grouped choices (category, sub choices) for sub_key, sub_value in value.items(): ret[sub_key] = sub_value else: # choice (key, display value) ret[key] = value return ret
[ "def", "flatten_choices_dict", "(", "choices", ")", ":", "ret", "=", "OrderedDict", "(", ")", "for", "key", ",", "value", "in", "choices", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "for", "sub_key", ",", "sub_value", "in", "value", ".", "items", "(", ")", ":", "ret", "[", "sub_key", "]", "=", "sub_value", "else", ":", "ret", "[", "key", "]", "=", "value", "return", "ret" ]
python
Convert a group choices dict into a flat dict of choices. flatten_choices({1: '1st', 2: '2nd'}) -> {1: '1st', 2: '2nd'} flatten_choices({'Group': {1: '1st', 2: '2nd'}}) -> {1: '1st', 2: '2nd'}
true
2,674,688
def fromXMLname(string): """Convert XML name to unicode string.""" retval = sub(r'_xFFFF_','', string ) def fun( matchobj ): return _fromUnicodeHex( matchobj.group(0) ) retval = sub(r'_x[0-9A-Za-z]+_', fun, retval ) return retval
[ "def", "fromXMLname", "(", "string", ")", ":", "retval", "=", "sub", "(", "r'_xFFFF_'", ",", "''", ",", "string", ")", "def", "fun", "(", "matchobj", ")", ":", "return", "_fromUnicodeHex", "(", "matchobj", ".", "group", "(", "0", ")", ")", "retval", "=", "sub", "(", "r'_x[0-9A-Za-z]+_'", ",", "fun", ",", "retval", ")", "return", "retval" ]
python
Convert XML name to unicode string.
true
2,676,730
def needs_label(model_field, field_name): """ Returns `True` if the label based on the model's verbose name is not equal to the default label it would have based on it's field name. """ default_label = field_name.replace('_', ' ').capitalize() return capfirst(model_field.verbose_name) != default_label
[ "def", "needs_label", "(", "model_field", ",", "field_name", ")", ":", "default_label", "=", "field_name", ".", "replace", "(", "'_'", ",", "' '", ")", ".", "capitalize", "(", ")", "return", "capfirst", "(", "model_field", ".", "verbose_name", ")", "!=", "default_label" ]
python
Returns `True` if the label based on the model's verbose name is not equal to the default label it would have based on it's field name.
true
2,676,940
def remove_trailing_string(content, trailing): """ Strip trailing component `trailing` from `content` if it exists. Used when generating names from view classes. """ if content.endswith(trailing) and content != trailing: return content[:-len(trailing)] return content
[ "def", "remove_trailing_string", "(", "content", ",", "trailing", ")", ":", "if", "content", ".", "endswith", "(", "trailing", ")", "and", "content", "!=", "trailing", ":", "return", "content", "[", ":", "-", "len", "(", "trailing", ")", "]", "return", "content" ]
python
Strip trailing component `trailing` from `content` if it exists. Used when generating names from view classes.
true
2,676,943
def markup_description(description): """ Apply HTML markup to the given description. """ if apply_markdown: description = apply_markdown(description) else: description = escape(description).replace('\n', '<br />') description = '<p>' + description + '</p>' return mark_safe(description)
[ "def", "markup_description", "(", "description", ")", ":", "if", "apply_markdown", ":", "description", "=", "apply_markdown", "(", "description", ")", "else", ":", "description", "=", "escape", "(", "description", ")", ".", "replace", "(", "'\\n'", ",", "'<br />'", ")", "description", "=", "'<p>'", "+", "description", "+", "'</p>'", "return", "mark_safe", "(", "description", ")" ]
python
Apply HTML markup to the given description.
true
2,677,488
def pointInsidePolygon(x, y, poly): """ Determine if a point is inside a given polygon or not Polygon is a list of (x,y) pairs. [code taken from: http://www.ariel.com.au/a/python-point-int-poly.html] let's make an easy square: >>> poly = [ (0,0),\ (1,0),\ (1,1),\ (0,1) ] >>> pointInsidePolygon(0.5,0.5, poly) True >>> pointInsidePolygon(1.5,1.5, poly) False """ n = len(poly) inside = False p1x, p1y = poly[0] for i in range(n + 1): p2x, p2y = poly[i % n] if y > min(p1y, p2y): if y <= max(p1y, p2y): if x <= max(p1x, p2x): if p1y != p2y: xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x if p1x == p2x or x <= xinters: inside = not inside p1x, p1y = p2x, p2y return inside
[ "def", "pointInsidePolygon", "(", "x", ",", "y", ",", "poly", ")", ":", "n", "=", "len", "(", "poly", ")", "inside", "=", "False", "p1x", ",", "p1y", "=", "poly", "[", "0", "]", "for", "i", "in", "range", "(", "n", "+", "1", ")", ":", "p2x", ",", "p2y", "=", "poly", "[", "i", "%", "n", "]", "if", "y", ">", "min", "(", "p1y", ",", "p2y", ")", ":", "if", "y", "<=", "max", "(", "p1y", ",", "p2y", ")", ":", "if", "x", "<=", "max", "(", "p1x", ",", "p2x", ")", ":", "if", "p1y", "!=", "p2y", ":", "xinters", "=", "(", "y", "-", "p1y", ")", "*", "(", "p2x", "-", "p1x", ")", "/", "(", "p2y", "-", "p1y", ")", "+", "p1x", "if", "p1x", "==", "p2x", "or", "x", "<=", "xinters", ":", "inside", "=", "not", "inside", "p1x", ",", "p1y", "=", "p2x", ",", "p2y", "return", "inside" ]
python
Determine if a point is inside a given polygon or not Polygon is a list of (x,y) pairs. [code taken from: http://www.ariel.com.au/a/python-point-int-poly.html] let's make an easy square: >>> poly = [ (0,0),\ (1,0),\ (1,1),\ (0,1) ] >>> pointInsidePolygon(0.5,0.5, poly) True >>> pointInsidePolygon(1.5,1.5, poly) False
true
2,677,857
def preserve_builtin_query_params(url, request=None): """ Given an incoming request, and an outgoing URL representation, append the value of any built-in query parameters. """ if request is None: return url overrides = [ api_settings.URL_FORMAT_OVERRIDE, ] for param in overrides: if param and (param in request.GET): value = request.GET[param] url = replace_query_param(url, param, value) return url
[ "def", "preserve_builtin_query_params", "(", "url", ",", "request", "=", "None", ")", ":", "if", "request", "is", "None", ":", "return", "url", "overrides", "=", "[", "api_settings", ".", "URL_FORMAT_OVERRIDE", ",", "]", "for", "param", "in", "overrides", ":", "if", "param", "and", "(", "param", "in", "request", ".", "GET", ")", ":", "value", "=", "request", ".", "GET", "[", "param", "]", "url", "=", "replace_query_param", "(", "url", ",", "param", ",", "value", ")", "return", "url" ]
python
Given an incoming request, and an outgoing URL representation, append the value of any built-in query parameters.
true
2,677,993
def _inclusiveNamespacePrefixes(node, context, unsuppressedPrefixes): '''http://www.w3.org/TR/xml-exc-c14n/ InclusiveNamespaces PrefixList parameter, which lists namespace prefixes that are handled in the manner described by the Canonical XML Recommendation''' inclusive = [] if node.prefix: usedPrefixes = ['xmlns:%s' %node.prefix] else: usedPrefixes = ['xmlns'] for a in _attrs(node): if a.nodeName.startswith('xmlns') or not a.prefix: continue usedPrefixes.append('xmlns:%s' %a.prefix) unused_namespace_dict = {} for attr in context: n = attr.nodeName if n in unsuppressedPrefixes: inclusive.append(attr) elif n.startswith('xmlns:') and n[6:] in unsuppressedPrefixes: inclusive.append(attr) elif n.startswith('xmlns') and n[5:] in unsuppressedPrefixes: inclusive.append(attr) elif attr.nodeName in usedPrefixes: inclusive.append(attr) elif n.startswith('xmlns:'): unused_namespace_dict[n] = attr.value return inclusive, unused_namespace_dict
[ "def", "_inclusiveNamespacePrefixes", "(", "node", ",", "context", ",", "unsuppressedPrefixes", ")", ":", "inclusive", "=", "[", "]", "if", "node", ".", "prefix", ":", "usedPrefixes", "=", "[", "'xmlns:%s'", "%", "node", ".", "prefix", "]", "else", ":", "usedPrefixes", "=", "[", "'xmlns'", "]", "for", "a", "in", "_attrs", "(", "node", ")", ":", "if", "a", ".", "nodeName", ".", "startswith", "(", "'xmlns'", ")", "or", "not", "a", ".", "prefix", ":", "continue", "usedPrefixes", ".", "append", "(", "'xmlns:%s'", "%", "a", ".", "prefix", ")", "unused_namespace_dict", "=", "{", "}", "for", "attr", "in", "context", ":", "n", "=", "attr", ".", "nodeName", "if", "n", "in", "unsuppressedPrefixes", ":", "inclusive", ".", "append", "(", "attr", ")", "elif", "n", ".", "startswith", "(", "'xmlns:'", ")", "and", "n", "[", "6", ":", "]", "in", "unsuppressedPrefixes", ":", "inclusive", ".", "append", "(", "attr", ")", "elif", "n", ".", "startswith", "(", "'xmlns'", ")", "and", "n", "[", "5", ":", "]", "in", "unsuppressedPrefixes", ":", "inclusive", ".", "append", "(", "attr", ")", "elif", "attr", ".", "nodeName", "in", "usedPrefixes", ":", "inclusive", ".", "append", "(", "attr", ")", "elif", "n", ".", "startswith", "(", "'xmlns:'", ")", ":", "unused_namespace_dict", "[", "n", "]", "=", "attr", ".", "value", "return", "inclusive", ",", "unused_namespace_dict" ]
python
http://www.w3.org/TR/xml-exc-c14n/ InclusiveNamespaces PrefixList parameter, which lists namespace prefixes that are handled in the manner described by the Canonical XML Recommendation
true
2,677,994
def Canonicalize(node, output=None, **kw): '''Canonicalize(node, output=None, **kw) -> UTF-8 Canonicalize a DOM document/element node and all descendents. Return the text; if output is specified then output.write will be called to output the text and None will be returned Keyword parameters: nsdict: a dictionary of prefix:uri namespace entries assumed to exist in the surrounding context comments: keep comments if non-zero (default is 0) subset: Canonical XML subsetting resulting from XPath (default is []) unsuppressedPrefixes: do exclusive C14N, and this specifies the prefixes that should be inherited. ''' if output: apply(_implementation, (node, output.write), kw) else: s = StringIO.StringIO() apply(_implementation, (node, s.write), kw) return s.getvalue()
[ "def", "Canonicalize", "(", "node", ",", "output", "=", "None", ",", "**", "kw", ")", ":", "if", "output", ":", "apply", "(", "_implementation", ",", "(", "node", ",", "output", ".", "write", ")", ",", "kw", ")", "else", ":", "s", "=", "StringIO", ".", "StringIO", "(", ")", "apply", "(", "_implementation", ",", "(", "node", ",", "s", ".", "write", ")", ",", "kw", ")", "return", "s", ".", "getvalue", "(", ")" ]
python
Canonicalize(node, output=None, **kw) -> UTF-8 Canonicalize a DOM document/element node and all descendents. Return the text; if output is specified then output.write will be called to output the text and None will be returned Keyword parameters: nsdict: a dictionary of prefix:uri namespace entries assumed to exist in the surrounding context comments: keep comments if non-zero (default is 0) subset: Canonical XML subsetting resulting from XPath (default is []) unsuppressedPrefixes: do exclusive C14N, and this specifies the prefixes that should be inherited.
true
2,679,597
def subclass_exception(name, parents, module, attached_to=None): """ Create exception subclass. If 'attached_to' is supplied, the exception will be created in a way that allows it to be pickled, assuming the returned exception class will be added as an attribute to the 'attached_to' class. """ class_dict = {'__module__': module} if attached_to is not None: def __reduce__(self): # Exceptions are special - they've got state that isn't # in self.__dict__. We assume it is all in self.args. return (unpickle_inner_exception, (attached_to, name), self.args) def __setstate__(self, args): self.args = args class_dict['__reduce__'] = __reduce__ class_dict['__setstate__'] = __setstate__ return type(name, parents, class_dict)
[ "def", "subclass_exception", "(", "name", ",", "parents", ",", "module", ",", "attached_to", "=", "None", ")", ":", "class_dict", "=", "{", "'__module__'", ":", "module", "}", "if", "attached_to", "is", "not", "None", ":", "def", "__reduce__", "(", "self", ")", ":", "return", "(", "unpickle_inner_exception", ",", "(", "attached_to", ",", "name", ")", ",", "self", ".", "args", ")", "def", "__setstate__", "(", "self", ",", "args", ")", ":", "self", ".", "args", "=", "args", "class_dict", "[", "'__reduce__'", "]", "=", "__reduce__", "class_dict", "[", "'__setstate__'", "]", "=", "__setstate__", "return", "type", "(", "name", ",", "parents", ",", "class_dict", ")" ]
python
Create exception subclass. If 'attached_to' is supplied, the exception will be created in a way that allows it to be pickled, assuming the returned exception class will be added as an attribute to the 'attached_to' class.
true
2,679,740
def get_supported_platform(): """Return this platform's maximum compatible version. distutils.util.get_platform() normally reports the minimum version of Mac OS X that would be required to *use* extensions produced by distutils. But what we want when checking compatibility is to know the version of Mac OS X that we are *running*. To allow usage of packages that explicitly require a newer version of Mac OS X, we must also know the current version of the OS. If this condition occurs for any other platform with a version in its platform strings, this function should be extended accordingly. """ plat = get_build_platform(); m = macosVersionString.match(plat) if m is not None and sys.platform == "darwin": try: plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3)) except ValueError: pass # not Mac OS X return plat
[ "def", "get_supported_platform", "(", ")", ":", "plat", "=", "get_build_platform", "(", ")", ";", "m", "=", "macosVersionString", ".", "match", "(", "plat", ")", "if", "m", "is", "not", "None", "and", "sys", ".", "platform", "==", "\"darwin\"", ":", "try", ":", "plat", "=", "'macosx-%s-%s'", "%", "(", "'.'", ".", "join", "(", "_macosx_vers", "(", ")", "[", ":", "2", "]", ")", ",", "m", ".", "group", "(", "3", ")", ")", "except", "ValueError", ":", "pass", "return", "plat" ]
python
Return this platform's maximum compatible version. distutils.util.get_platform() normally reports the minimum version of Mac OS X that would be required to *use* extensions produced by distutils. But what we want when checking compatibility is to know the version of Mac OS X that we are *running*. To allow usage of packages that explicitly require a newer version of Mac OS X, we must also know the current version of the OS. If this condition occurs for any other platform with a version in its platform strings, this function should be extended accordingly.
true
2,679,742
def get_importer(path_item): """Retrieve a PEP 302 "importer" for the given path item If there is no importer, this returns a wrapper around the builtin import machinery. The returned importer is only cached if it was created by a path hook. """ try: importer = sys.path_importer_cache[path_item] except KeyError: for hook in sys.path_hooks: try: importer = hook(path_item) except ImportError: pass else: break else: importer = None sys.path_importer_cache.setdefault(path_item,importer) if importer is None: try: importer = ImpWrapper(path_item) except ImportError: pass return importer
[ "def", "get_importer", "(", "path_item", ")", ":", "try", ":", "importer", "=", "sys", ".", "path_importer_cache", "[", "path_item", "]", "except", "KeyError", ":", "for", "hook", "in", "sys", ".", "path_hooks", ":", "try", ":", "importer", "=", "hook", "(", "path_item", ")", "except", "ImportError", ":", "pass", "else", ":", "break", "else", ":", "importer", "=", "None", "sys", ".", "path_importer_cache", ".", "setdefault", "(", "path_item", ",", "importer", ")", "if", "importer", "is", "None", ":", "try", ":", "importer", "=", "ImpWrapper", "(", "path_item", ")", "except", "ImportError", ":", "pass", "return", "importer" ]
python
Retrieve a PEP 302 "importer" for the given path item If there is no importer, this returns a wrapper around the builtin import machinery. The returned importer is only cached if it was created by a path hook.
true
2,679,743
def StringIO(*args, **kw): """Thunk to load the real StringIO on demand""" global StringIO try: from cStringIO import StringIO except ImportError: from StringIO import StringIO return StringIO(*args,**kw)
[ "def", "StringIO", "(", "*", "args", ",", "**", "kw", ")", ":", "global", "StringIO", "try", ":", "from", "cStringIO", "import", "StringIO", "except", "ImportError", ":", "from", "StringIO", "import", "StringIO", "return", "StringIO", "(", "*", "args", ",", "**", "kw", ")" ]
python
Thunk to load the real StringIO on demand
true
2,679,747
def _override_setuptools(req): """Return True when distribute wants to override a setuptools dependency. We want to override when the requirement is setuptools and the version is a variant of 0.6. """ if req.project_name == 'setuptools': if not len(req.specs): # Just setuptools: ok return True for comparator, version in req.specs: if comparator in ['==', '>=', '>']: if '0.7' in version: # We want some setuptools not from the 0.6 series. return False return True return False
[ "def", "_override_setuptools", "(", "req", ")", ":", "if", "req", ".", "project_name", "==", "'setuptools'", ":", "if", "not", "len", "(", "req", ".", "specs", ")", ":", "return", "True", "for", "comparator", ",", "version", "in", "req", ".", "specs", ":", "if", "comparator", "in", "[", "'=='", ",", "'>='", ",", "'>'", "]", ":", "if", "'0.7'", "in", "version", ":", "return", "False", "return", "True", "return", "False" ]
python
Return True when distribute wants to override a setuptools dependency. We want to override when the requirement is setuptools and the version is a variant of 0.6.
true
2,680,544
def passwd_check(hashed_passphrase, passphrase): """Verify that a given passphrase matches its hashed version. Parameters ---------- hashed_passphrase : str Hashed password, in the format returned by `passwd`. passphrase : str Passphrase to validate. Returns ------- valid : bool True if the passphrase matches the hash. Examples -------- In [1]: from IPython.lib.security import passwd_check In [2]: passwd_check('sha1:0e112c3ddfce:a68df677475c2b47b6e86d0467eec97ac5f4b85a', ...: 'mypassword') Out[2]: True In [3]: passwd_check('sha1:0e112c3ddfce:a68df677475c2b47b6e86d0467eec97ac5f4b85a', ...: 'anotherpassword') Out[3]: False """ try: algorithm, salt, pw_digest = hashed_passphrase.split(':', 2) except (ValueError, TypeError): return False try: h = hashlib.new(algorithm) except ValueError: return False if len(pw_digest) == 0: return False h.update(cast_bytes(passphrase, 'utf-8') + str_to_bytes(salt, 'ascii')) return h.hexdigest() == pw_digest
[ "def", "passwd_check", "(", "hashed_passphrase", ",", "passphrase", ")", ":", "try", ":", "algorithm", ",", "salt", ",", "pw_digest", "=", "hashed_passphrase", ".", "split", "(", "':'", ",", "2", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "return", "False", "try", ":", "h", "=", "hashlib", ".", "new", "(", "algorithm", ")", "except", "ValueError", ":", "return", "False", "if", "len", "(", "pw_digest", ")", "==", "0", ":", "return", "False", "h", ".", "update", "(", "cast_bytes", "(", "passphrase", ",", "'utf-8'", ")", "+", "str_to_bytes", "(", "salt", ",", "'ascii'", ")", ")", "return", "h", ".", "hexdigest", "(", ")", "==", "pw_digest" ]
python
Verify that a given passphrase matches its hashed version. Parameters ---------- hashed_passphrase : str Hashed password, in the format returned by `passwd`. passphrase : str Passphrase to validate. Returns ------- valid : bool True if the passphrase matches the hash. Examples -------- In [1]: from IPython.lib.security import passwd_check In [2]: passwd_check('sha1:0e112c3ddfce:a68df677475c2b47b6e86d0467eec97ac5f4b85a', ...: 'mypassword') Out[2]: True In [3]: passwd_check('sha1:0e112c3ddfce:a68df677475c2b47b6e86d0467eec97ac5f4b85a', ...: 'anotherpassword') Out[3]: False
true
2,680,565
def ajax_editable_boolean(attr, short_description): """ Convenience function: Assign the return value of this method to a variable of your ModelAdmin class and put the variable name into list_display. Example:: class MyTreeEditor(TreeEditor): list_display = ('__unicode__', 'active_toggle') active_toggle = ajax_editable_boolean('active', _('is active')) """ def _fn(self, item): return ajax_editable_boolean_cell(item, attr) _fn.allow_tags = True _fn.short_description = short_description _fn.editable_boolean_field = attr return _fn
[ "def", "ajax_editable_boolean", "(", "attr", ",", "short_description", ")", ":", "def", "_fn", "(", "self", ",", "item", ")", ":", "return", "ajax_editable_boolean_cell", "(", "item", ",", "attr", ")", "_fn", ".", "allow_tags", "=", "True", "_fn", ".", "short_description", "=", "short_description", "_fn", ".", "editable_boolean_field", "=", "attr", "return", "_fn" ]
python
Convenience function: Assign the return value of this method to a variable of your ModelAdmin class and put the variable name into list_display. Example:: class MyTreeEditor(TreeEditor): list_display = ('__unicode__', 'active_toggle') active_toggle = ajax_editable_boolean('active', _('is active'))
true
2,680,584
def processor_for(content_model_or_slug, exact_page=False): """ Decorator that registers the decorated function as a page processor for the given content model or slug. When a page exists that forms the prefix of custom urlpatterns in a project (eg: the blog page and app), the page will be added to the template context. Passing in ``True`` for the ``exact_page`` arg, will ensure that the page processor is not run in this situation, requiring that the loaded page object is for the exact URL currently being viewed. """ content_model = None slug = "" if isinstance(content_model_or_slug, (str, _str)): try: parts = content_model_or_slug.split(".", 1) content_model = apps.get_model(*parts) except (TypeError, ValueError, LookupError): slug = content_model_or_slug elif issubclass(content_model_or_slug, Page): content_model = content_model_or_slug else: raise TypeError("%s is not a valid argument for page_processor, " "which should be a model subclass of Page in class " "or string form (app.model), or a valid slug" % content_model_or_slug) def decorator(func): parts = (func, exact_page) if content_model: model_name = content_model._meta.object_name.lower() processors[model_name].insert(0, parts) else: processors["slug:%s" % slug].insert(0, parts) return func return decorator
[ "def", "processor_for", "(", "content_model_or_slug", ",", "exact_page", "=", "False", ")", ":", "content_model", "=", "None", "slug", "=", "\"\"", "if", "isinstance", "(", "content_model_or_slug", ",", "(", "str", ",", "_str", ")", ")", ":", "try", ":", "parts", "=", "content_model_or_slug", ".", "split", "(", "\".\"", ",", "1", ")", "content_model", "=", "apps", ".", "get_model", "(", "*", "parts", ")", "except", "(", "TypeError", ",", "ValueError", ",", "LookupError", ")", ":", "slug", "=", "content_model_or_slug", "elif", "issubclass", "(", "content_model_or_slug", ",", "Page", ")", ":", "content_model", "=", "content_model_or_slug", "else", ":", "raise", "TypeError", "(", "\"%s is not a valid argument for page_processor, \"", "\"which should be a model subclass of Page in class \"", "\"or string form (app.model), or a valid slug\"", "%", "content_model_or_slug", ")", "def", "decorator", "(", "func", ")", ":", "parts", "=", "(", "func", ",", "exact_page", ")", "if", "content_model", ":", "model_name", "=", "content_model", ".", "_meta", ".", "object_name", ".", "lower", "(", ")", "processors", "[", "model_name", "]", ".", "insert", "(", "0", ",", "parts", ")", "else", ":", "processors", "[", "\"slug:%s\"", "%", "slug", "]", ".", "insert", "(", "0", ",", "parts", ")", "return", "func", "return", "decorator" ]
python
Decorator that registers the decorated function as a page processor for the given content model or slug. When a page exists that forms the prefix of custom urlpatterns in a project (eg: the blog page and app), the page will be added to the template context. Passing in ``True`` for the ``exact_page`` arg, will ensure that the page processor is not run in this situation, requiring that the loaded page object is for the exact URL currently being viewed.
true
2,680,597
def make_color_table(in_class): """Build a set of color attributes in a class. Helper function for building the *TermColors classes.""" for name,value in color_templates: setattr(in_class,name,in_class._base % value)
[ "def", "make_color_table", "(", "in_class", ")", ":", "for", "name", ",", "value", "in", "color_templates", ":", "setattr", "(", "in_class", ",", "name", ",", "in_class", ".", "_base", "%", "value", ")" ]
python
Build a set of color attributes in a class. Helper function for building the *TermColors classes.
true
2,680,841
def find_command(cmd, paths=None, pathext=None): """Searches the PATH for the given command and returns its path""" if paths is None: paths = os.environ.get('PATH', '').split(os.pathsep) if isinstance(paths, six.string_types): paths = [paths] # check if there are funny path extensions for executables, e.g. Windows if pathext is None: pathext = get_pathext() pathext = [ext for ext in pathext.lower().split(os.pathsep) if len(ext)] # don't use extensions if the command ends with one of them if os.path.splitext(cmd)[1].lower() in pathext: pathext = [''] # check if we find the command on PATH for path in paths: # try without extension first cmd_path = os.path.join(path, cmd) for ext in pathext: # then including the extension cmd_path_ext = cmd_path + ext if os.path.isfile(cmd_path_ext): return cmd_path_ext if os.path.isfile(cmd_path): return cmd_path raise BadCommand('Cannot find command %r' % cmd)
[ "def", "find_command", "(", "cmd", ",", "paths", "=", "None", ",", "pathext", "=", "None", ")", ":", "if", "paths", "is", "None", ":", "paths", "=", "os", ".", "environ", ".", "get", "(", "'PATH'", ",", "''", ")", ".", "split", "(", "os", ".", "pathsep", ")", "if", "isinstance", "(", "paths", ",", "six", ".", "string_types", ")", ":", "paths", "=", "[", "paths", "]", "if", "pathext", "is", "None", ":", "pathext", "=", "get_pathext", "(", ")", "pathext", "=", "[", "ext", "for", "ext", "in", "pathext", ".", "lower", "(", ")", ".", "split", "(", "os", ".", "pathsep", ")", "if", "len", "(", "ext", ")", "]", "if", "os", ".", "path", ".", "splitext", "(", "cmd", ")", "[", "1", "]", ".", "lower", "(", ")", "in", "pathext", ":", "pathext", "=", "[", "''", "]", "for", "path", "in", "paths", ":", "cmd_path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "cmd", ")", "for", "ext", "in", "pathext", ":", "cmd_path_ext", "=", "cmd_path", "+", "ext", "if", "os", ".", "path", ".", "isfile", "(", "cmd_path_ext", ")", ":", "return", "cmd_path_ext", "if", "os", ".", "path", ".", "isfile", "(", "cmd_path", ")", ":", "return", "cmd_path", "raise", "BadCommand", "(", "'Cannot find command %r'", "%", "cmd", ")" ]
python
Searches the PATH for the given command and returns its path
true
2,680,842
def normalize_path(path): """ Convert a path to its canonical, case-normalized, absolute version. """ return os.path.normcase(os.path.realpath(os.path.expanduser(path)))
[ "def", "normalize_path", "(", "path", ")", ":", "return", "os", ".", "path", ".", "normcase", "(", "os", ".", "path", ".", "realpath", "(", "os", ".", "path", ".", "expanduser", "(", "path", ")", ")", ")" ]
python
Convert a path to its canonical, case-normalized, absolute version.
true
2,680,854
def check_nsp(dist, attr, value): """Verify that namespace packages are valid""" assert_string_list(dist,attr,value) for nsp in value: if not dist.has_contents_for(nsp): raise DistutilsSetupError( "Distribution contains no modules or packages for " + "namespace package %r" % nsp ) if '.' in nsp: parent = '.'.join(nsp.split('.')[:-1]) if parent not in value: distutils.log.warn( "%r is declared as a package namespace, but %r is not:" " please correct this in setup.py", nsp, parent )
[ "def", "check_nsp", "(", "dist", ",", "attr", ",", "value", ")", ":", "assert_string_list", "(", "dist", ",", "attr", ",", "value", ")", "for", "nsp", "in", "value", ":", "if", "not", "dist", ".", "has_contents_for", "(", "nsp", ")", ":", "raise", "DistutilsSetupError", "(", "\"Distribution contains no modules or packages for \"", "+", "\"namespace package %r\"", "%", "nsp", ")", "if", "'.'", "in", "nsp", ":", "parent", "=", "'.'", ".", "join", "(", "nsp", ".", "split", "(", "'.'", ")", "[", ":", "-", "1", "]", ")", "if", "parent", "not", "in", "value", ":", "distutils", ".", "log", ".", "warn", "(", "\"%r is declared as a package namespace, but %r is not:\"", "\" please correct this in setup.py\"", ",", "nsp", ",", "parent", ")" ]
python
Verify that namespace packages are valid
true
2,680,856
def check_entry_points(dist, attr, value): """Verify that entry_points map is parseable""" try: pkg_resources.EntryPoint.parse_map(value) except ValueError, e: raise DistutilsSetupError(e)
[ "def", "check_entry_points", "(", "dist", ",", "attr", ",", "value", ")", ":", "try", ":", "pkg_resources", ".", "EntryPoint", ".", "parse_map", "(", "value", ")", "except", "ValueError", ",", "e", ":", "raise", "DistutilsSetupError", "(", "e", ")" ]
python
Verify that entry_points map is parseable
true
2,681,027
def last_blank(src): """Determine if the input source ends in a blank. A blank is either a newline or a line consisting of whitespace. Parameters ---------- src : string A single or multiline string. """ if not src: return False ll = src.splitlines()[-1] return (ll == '') or ll.isspace()
[ "def", "last_blank", "(", "src", ")", ":", "if", "not", "src", ":", "return", "False", "ll", "=", "src", ".", "splitlines", "(", ")", "[", "-", "1", "]", "return", "(", "ll", "==", "''", ")", "or", "ll", ".", "isspace", "(", ")" ]
python
Determine if the input source ends in a blank. A blank is either a newline or a line consisting of whitespace. Parameters ---------- src : string A single or multiline string.
true
2,681,028
def last_two_blanks(src): """Determine if the input source ends in two blanks. A blank is either a newline or a line consisting of whitespace. Parameters ---------- src : string A single or multiline string. """ if not src: return False # The logic here is tricky: I couldn't get a regexp to work and pass all # the tests, so I took a different approach: split the source by lines, # grab the last two and prepend '###\n' as a stand-in for whatever was in # the body before the last two lines. Then, with that structure, it's # possible to analyze with two regexps. Not the most elegant solution, but # it works. If anyone tries to change this logic, make sure to validate # the whole test suite first! new_src = '\n'.join(['###\n'] + src.splitlines()[-2:]) return (bool(last_two_blanks_re.match(new_src)) or bool(last_two_blanks_re2.match(new_src)) )
[ "def", "last_two_blanks", "(", "src", ")", ":", "if", "not", "src", ":", "return", "False", "new_src", "=", "'\\n'", ".", "join", "(", "[", "'###\\n'", "]", "+", "src", ".", "splitlines", "(", ")", "[", "-", "2", ":", "]", ")", "return", "(", "bool", "(", "last_two_blanks_re", ".", "match", "(", "new_src", ")", ")", "or", "bool", "(", "last_two_blanks_re2", ".", "match", "(", "new_src", ")", ")", ")" ]
python
Determine if the input source ends in two blanks. A blank is either a newline or a line consisting of whitespace. Parameters ---------- src : string A single or multiline string.
true
2,681,667
def confirmation_view(template, doc="Display a confirmation view."): """ Confirmation view generator for the "comment was posted/flagged/deleted/approved" views. """ def confirmed(request): comment = None if 'c' in request.GET: try: comment = comments.get_model().objects.get(pk=request.GET['c']) except (ObjectDoesNotExist, ValueError): pass return render(request, template, {'comment': comment}) confirmed.__doc__ = textwrap.dedent("""\ %s Templates: :template:`%s`` Context: comment The posted comment """ % (doc, template) ) return confirmed
[ "def", "confirmation_view", "(", "template", ",", "doc", "=", "\"Display a confirmation view.\"", ")", ":", "def", "confirmed", "(", "request", ")", ":", "comment", "=", "None", "if", "'c'", "in", "request", ".", "GET", ":", "try", ":", "comment", "=", "comments", ".", "get_model", "(", ")", ".", "objects", ".", "get", "(", "pk", "=", "request", ".", "GET", "[", "'c'", "]", ")", "except", "(", "ObjectDoesNotExist", ",", "ValueError", ")", ":", "pass", "return", "render", "(", "request", ",", "template", ",", "{", "'comment'", ":", "comment", "}", ")", "confirmed", ".", "__doc__", "=", "textwrap", ".", "dedent", "(", "\"\"\"\\\n %s\n Templates: :template:`%s``\n Context:\n comment\n The posted comment\n \"\"\"", "%", "(", "doc", ",", "template", ")", ")", "return", "confirmed" ]
python
Confirmation view generator for the "comment was posted/flagged/deleted/approved" views.
true
2,681,867
def absolute_urls(html): """ Converts relative URLs into absolute URLs. Used for RSS feeds to provide more complete HTML for item descriptions, but could also be used as a general richtext filter. """ from bs4 import BeautifulSoup from yacms.core.request import current_request request = current_request() if request is not None: dom = BeautifulSoup(html, "html.parser") for tag, attr in ABSOLUTE_URL_TAGS.items(): for node in dom.findAll(tag): url = node.get(attr, "") if url: node[attr] = request.build_absolute_uri(url) html = str(dom) return html
[ "def", "absolute_urls", "(", "html", ")", ":", "from", "bs4", "import", "BeautifulSoup", "from", "yacms", ".", "core", ".", "request", "import", "current_request", "request", "=", "current_request", "(", ")", "if", "request", "is", "not", "None", ":", "dom", "=", "BeautifulSoup", "(", "html", ",", "\"html.parser\"", ")", "for", "tag", ",", "attr", "in", "ABSOLUTE_URL_TAGS", ".", "items", "(", ")", ":", "for", "node", "in", "dom", ".", "findAll", "(", "tag", ")", ":", "url", "=", "node", ".", "get", "(", "attr", ",", "\"\"", ")", "if", "url", ":", "node", "[", "attr", "]", "=", "request", ".", "build_absolute_uri", "(", "url", ")", "html", "=", "str", "(", "dom", ")", "return", "html" ]
python
Converts relative URLs into absolute URLs. Used for RSS feeds to provide more complete HTML for item descriptions, but could also be used as a general richtext filter.
true
2,682,219
def split_user_input(line, pattern=None): """Split user input into initial whitespace, escape character, function part and the rest. """ # We need to ensure that the rest of this routine deals only with unicode encoding = get_stream_enc(sys.stdin, 'utf-8') line = py3compat.cast_unicode(line, encoding) if pattern is None: pattern = line_split match = pattern.match(line) if not match: # print "match failed for line '%s'" % line try: ifun, the_rest = line.split(None,1) except ValueError: # print "split failed for line '%s'" % line ifun, the_rest = line, u'' pre = re.match('^(\s*)(.*)',line).groups()[0] esc = "" else: pre, esc, ifun, the_rest = match.groups() #print 'line:<%s>' % line # dbg #print 'pre <%s> ifun <%s> rest <%s>' % (pre,ifun.strip(),the_rest) # dbg return pre, esc or '', ifun.strip(), the_rest.lstrip()
[ "def", "split_user_input", "(", "line", ",", "pattern", "=", "None", ")", ":", "encoding", "=", "get_stream_enc", "(", "sys", ".", "stdin", ",", "'utf-8'", ")", "line", "=", "py3compat", ".", "cast_unicode", "(", "line", ",", "encoding", ")", "if", "pattern", "is", "None", ":", "pattern", "=", "line_split", "match", "=", "pattern", ".", "match", "(", "line", ")", "if", "not", "match", ":", "try", ":", "ifun", ",", "the_rest", "=", "line", ".", "split", "(", "None", ",", "1", ")", "except", "ValueError", ":", "ifun", ",", "the_rest", "=", "line", ",", "u''", "pre", "=", "re", ".", "match", "(", "'^(\\s*)(.*)'", ",", "line", ")", ".", "groups", "(", ")", "[", "0", "]", "esc", "=", "\"\"", "else", ":", "pre", ",", "esc", ",", "ifun", ",", "the_rest", "=", "match", ".", "groups", "(", ")", "return", "pre", ",", "esc", "or", "''", ",", "ifun", ".", "strip", "(", ")", ",", "the_rest", ".", "lstrip", "(", ")" ]
python
Split user input into initial whitespace, escape character, function part and the rest.
true
2,682,743
def defaulterrorhandler(connection, cursor, errorclass, errorvalue): """ If cursor is not None, (errorclass, errorvalue) is appended to cursor.messages; otherwise it is appended to connection.messages. Then errorclass is raised with errorvalue as the value. You can override this with your own error handler by assigning it to the instance. """ error = errorclass, errorvalue if cursor: cursor.messages.append(error) else: connection.messages.append(error) del cursor del connection raise errorclass, errorvalue
[ "def", "defaulterrorhandler", "(", "connection", ",", "cursor", ",", "errorclass", ",", "errorvalue", ")", ":", "error", "=", "errorclass", ",", "errorvalue", "if", "cursor", ":", "cursor", ".", "messages", ".", "append", "(", "error", ")", "else", ":", "connection", ".", "messages", ".", "append", "(", "error", ")", "del", "cursor", "del", "connection", "raise", "errorclass", ",", "errorvalue" ]
python
If cursor is not None, (errorclass, errorvalue) is appended to cursor.messages; otherwise it is appended to connection.messages. Then errorclass is raised with errorvalue as the value. You can override this with your own error handler by assigning it to the instance.
true
2,683,012
def unique_slug(queryset, slug_field, slug): """ Ensures a slug is unique for the given queryset, appending an integer to its end until the slug is unique. """ i = 0 while True: if i > 0: if i > 1: slug = slug.rsplit("-", 1)[0] slug = "%s-%s" % (slug, i) try: queryset.get(**{slug_field: slug}) except ObjectDoesNotExist: break i += 1 return slug
[ "def", "unique_slug", "(", "queryset", ",", "slug_field", ",", "slug", ")", ":", "i", "=", "0", "while", "True", ":", "if", "i", ">", "0", ":", "if", "i", ">", "1", ":", "slug", "=", "slug", ".", "rsplit", "(", "\"-\"", ",", "1", ")", "[", "0", "]", "slug", "=", "\"%s-%s\"", "%", "(", "slug", ",", "i", ")", "try", ":", "queryset", ".", "get", "(", "**", "{", "slug_field", ":", "slug", "}", ")", "except", "ObjectDoesNotExist", ":", "break", "i", "+=", "1", "return", "slug" ]
python
Ensures a slug is unique for the given queryset, appending an integer to its end until the slug is unique.
true
2,683,013
def next_url(request): """ Returns URL to redirect to from the ``next`` param in the request. """ next = request.GET.get("next", request.POST.get("next", "")) host = request.get_host() return next if next and is_safe_url(next, host=host) else None
[ "def", "next_url", "(", "request", ")", ":", "next", "=", "request", ".", "GET", ".", "get", "(", "\"next\"", ",", "request", ".", "POST", ".", "get", "(", "\"next\"", ",", "\"\"", ")", ")", "host", "=", "request", ".", "get_host", "(", ")", "return", "next", "if", "next", "and", "is_safe_url", "(", "next", ",", "host", "=", "host", ")", "else", "None" ]
python
Returns URL to redirect to from the ``next`` param in the request.
true
2,683,802
def setastest(tf=True): """ Signals to nose that this function is or is not a test. Parameters ---------- tf : bool If True, specifies that the decorated callable is a test. If False, specifies that the decorated callable is not a test. Default is True. Notes ----- This decorator can't use the nose namespace, because it can be called from a non-test module. See also ``istest`` and ``nottest`` in ``nose.tools``. Examples -------- `setastest` can be used in the following way:: from numpy.testing.decorators import setastest @setastest(False) def func_with_test_in_name(arg1, arg2): pass """ def set_test(t): t.__test__ = tf return t return set_test
[ "def", "setastest", "(", "tf", "=", "True", ")", ":", "def", "set_test", "(", "t", ")", ":", "t", ".", "__test__", "=", "tf", "return", "t", "return", "set_test" ]
python
Signals to nose that this function is or is not a test. Parameters ---------- tf : bool If True, specifies that the decorated callable is a test. If False, specifies that the decorated callable is not a test. Default is True. Notes ----- This decorator can't use the nose namespace, because it can be called from a non-test module. See also ``istest`` and ``nottest`` in ``nose.tools``. Examples -------- `setastest` can be used in the following way:: from numpy.testing.decorators import setastest @setastest(False) def func_with_test_in_name(arg1, arg2): pass
true
2,683,803
def skipif(skip_condition, msg=None): """ Make function raise SkipTest exception if a given condition is true. If the condition is a callable, it is used at runtime to dynamically make the decision. This is useful for tests that may require costly imports, to delay the cost until the test suite is actually executed. Parameters ---------- skip_condition : bool or callable Flag to determine whether to skip the decorated test. msg : str, optional Message to give on raising a SkipTest exception. Default is None. Returns ------- decorator : function Decorator which, when applied to a function, causes SkipTest to be raised when `skip_condition` is True, and the function to be called normally otherwise. Notes ----- The decorator itself is decorated with the ``nose.tools.make_decorator`` function in order to transmit function name, and various other metadata. """ def skip_decorator(f): # Local import to avoid a hard nose dependency and only incur the # import time overhead at actual test-time. import nose # Allow for both boolean or callable skip conditions. if callable(skip_condition): skip_val = lambda : skip_condition() else: skip_val = lambda : skip_condition def get_msg(func,msg=None): """Skip message with information about function being skipped.""" if msg is None: out = 'Test skipped due to test condition' else: out = '\n'+msg return "Skipping test: %s%s" % (func.__name__,out) # We need to define *two* skippers because Python doesn't allow both # return with value and yield inside the same function. def skipper_func(*args, **kwargs): """Skipper for normal test functions.""" if skip_val(): raise nose.SkipTest(get_msg(f,msg)) else: return f(*args, **kwargs) def skipper_gen(*args, **kwargs): """Skipper for test generators.""" if skip_val(): raise nose.SkipTest(get_msg(f,msg)) else: for x in f(*args, **kwargs): yield x # Choose the right skipper to use when building the actual decorator. if nose.util.isgenerator(f): skipper = skipper_gen else: skipper = skipper_func return nose.tools.make_decorator(f)(skipper) return skip_decorator
[ "def", "skipif", "(", "skip_condition", ",", "msg", "=", "None", ")", ":", "def", "skip_decorator", "(", "f", ")", ":", "import", "nose", "if", "callable", "(", "skip_condition", ")", ":", "skip_val", "=", "lambda", ":", "skip_condition", "(", ")", "else", ":", "skip_val", "=", "lambda", ":", "skip_condition", "def", "get_msg", "(", "func", ",", "msg", "=", "None", ")", ":", "if", "msg", "is", "None", ":", "out", "=", "'Test skipped due to test condition'", "else", ":", "out", "=", "'\\n'", "+", "msg", "return", "\"Skipping test: %s%s\"", "%", "(", "func", ".", "__name__", ",", "out", ")", "def", "skipper_func", "(", "*", "args", ",", "**", "kwargs", ")", ":", "if", "skip_val", "(", ")", ":", "raise", "nose", ".", "SkipTest", "(", "get_msg", "(", "f", ",", "msg", ")", ")", "else", ":", "return", "f", "(", "*", "args", ",", "**", "kwargs", ")", "def", "skipper_gen", "(", "*", "args", ",", "**", "kwargs", ")", ":", "if", "skip_val", "(", ")", ":", "raise", "nose", ".", "SkipTest", "(", "get_msg", "(", "f", ",", "msg", ")", ")", "else", ":", "for", "x", "in", "f", "(", "*", "args", ",", "**", "kwargs", ")", ":", "yield", "x", "if", "nose", ".", "util", ".", "isgenerator", "(", "f", ")", ":", "skipper", "=", "skipper_gen", "else", ":", "skipper", "=", "skipper_func", "return", "nose", ".", "tools", ".", "make_decorator", "(", "f", ")", "(", "skipper", ")", "return", "skip_decorator" ]
python
Make function raise SkipTest exception if a given condition is true. If the condition is a callable, it is used at runtime to dynamically make the decision. This is useful for tests that may require costly imports, to delay the cost until the test suite is actually executed. Parameters ---------- skip_condition : bool or callable Flag to determine whether to skip the decorated test. msg : str, optional Message to give on raising a SkipTest exception. Default is None. Returns ------- decorator : function Decorator which, when applied to a function, causes SkipTest to be raised when `skip_condition` is True, and the function to be called normally otherwise. Notes ----- The decorator itself is decorated with the ``nose.tools.make_decorator`` function in order to transmit function name, and various other metadata.
true
2,683,804
def knownfailureif(fail_condition, msg=None): """ Make function raise KnownFailureTest exception if given condition is true. If the condition is a callable, it is used at runtime to dynamically make the decision. This is useful for tests that may require costly imports, to delay the cost until the test suite is actually executed. Parameters ---------- fail_condition : bool or callable Flag to determine whether to mark the decorated test as a known failure (if True) or not (if False). msg : str, optional Message to give on raising a KnownFailureTest exception. Default is None. Returns ------- decorator : function Decorator, which, when applied to a function, causes SkipTest to be raised when `skip_condition` is True, and the function to be called normally otherwise. Notes ----- The decorator itself is decorated with the ``nose.tools.make_decorator`` function in order to transmit function name, and various other metadata. """ if msg is None: msg = 'Test skipped due to known failure' # Allow for both boolean or callable known failure conditions. if callable(fail_condition): fail_val = lambda : fail_condition() else: fail_val = lambda : fail_condition def knownfail_decorator(f): # Local import to avoid a hard nose dependency and only incur the # import time overhead at actual test-time. import nose def knownfailer(*args, **kwargs): if fail_val(): raise KnownFailureTest, msg else: return f(*args, **kwargs) return nose.tools.make_decorator(f)(knownfailer) return knownfail_decorator
[ "def", "knownfailureif", "(", "fail_condition", ",", "msg", "=", "None", ")", ":", "if", "msg", "is", "None", ":", "msg", "=", "'Test skipped due to known failure'", "if", "callable", "(", "fail_condition", ")", ":", "fail_val", "=", "lambda", ":", "fail_condition", "(", ")", "else", ":", "fail_val", "=", "lambda", ":", "fail_condition", "def", "knownfail_decorator", "(", "f", ")", ":", "import", "nose", "def", "knownfailer", "(", "*", "args", ",", "**", "kwargs", ")", ":", "if", "fail_val", "(", ")", ":", "raise", "KnownFailureTest", ",", "msg", "else", ":", "return", "f", "(", "*", "args", ",", "**", "kwargs", ")", "return", "nose", ".", "tools", ".", "make_decorator", "(", "f", ")", "(", "knownfailer", ")", "return", "knownfail_decorator" ]
python
Make function raise KnownFailureTest exception if given condition is true. If the condition is a callable, it is used at runtime to dynamically make the decision. This is useful for tests that may require costly imports, to delay the cost until the test suite is actually executed. Parameters ---------- fail_condition : bool or callable Flag to determine whether to mark the decorated test as a known failure (if True) or not (if False). msg : str, optional Message to give on raising a KnownFailureTest exception. Default is None. Returns ------- decorator : function Decorator, which, when applied to a function, causes SkipTest to be raised when `skip_condition` is True, and the function to be called normally otherwise. Notes ----- The decorator itself is decorated with the ``nose.tools.make_decorator`` function in order to transmit function name, and various other metadata.
true
2,683,821
def list_profiles_in(path): """list profiles in a given root directory""" files = os.listdir(path) profiles = [] for f in files: full_path = os.path.join(path, f) if os.path.isdir(full_path) and f.startswith('profile_'): profiles.append(f.split('_',1)[-1]) return profiles
[ "def", "list_profiles_in", "(", "path", ")", ":", "files", "=", "os", ".", "listdir", "(", "path", ")", "profiles", "=", "[", "]", "for", "f", "in", "files", ":", "full_path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "f", ")", "if", "os", ".", "path", ".", "isdir", "(", "full_path", ")", "and", "f", ".", "startswith", "(", "'profile_'", ")", ":", "profiles", ".", "append", "(", "f", ".", "split", "(", "'_'", ",", "1", ")", "[", "-", "1", "]", ")", "return", "profiles" ]
python
list profiles in a given root directory
true
2,683,822
def list_bundled_profiles(): """list profiles that are bundled with IPython.""" path = os.path.join(get_ipython_package_dir(), u'config', u'profile') files = os.listdir(path) profiles = [] for profile in files: full_path = os.path.join(path, profile) if os.path.isdir(full_path) and profile != "__pycache__": profiles.append(profile) return profiles
[ "def", "list_bundled_profiles", "(", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "get_ipython_package_dir", "(", ")", ",", "u'config'", ",", "u'profile'", ")", "files", "=", "os", ".", "listdir", "(", "path", ")", "profiles", "=", "[", "]", "for", "profile", "in", "files", ":", "full_path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "profile", ")", "if", "os", ".", "path", ".", "isdir", "(", "full_path", ")", "and", "profile", "!=", "\"__pycache__\"", ":", "profiles", ".", "append", "(", "profile", ")", "return", "profiles" ]
python
list profiles that are bundled with IPython.
true
2,683,896
def _bypass_ensure_directory(path, mode=0o777): """Sandbox-bypassing version of ensure_directory()""" if not WRITE_SUPPORT: raise IOError('"os.mkdir" not supported on this platform.') dirname, filename = split(path) if dirname and filename and not isdir(dirname): _bypass_ensure_directory(dirname) mkdir(dirname, mode)
[ "def", "_bypass_ensure_directory", "(", "path", ",", "mode", "=", "0o777", ")", ":", "if", "not", "WRITE_SUPPORT", ":", "raise", "IOError", "(", "'\"os.mkdir\" not supported on this platform.'", ")", "dirname", ",", "filename", "=", "split", "(", "path", ")", "if", "dirname", "and", "filename", "and", "not", "isdir", "(", "dirname", ")", ":", "_bypass_ensure_directory", "(", "dirname", ")", "mkdir", "(", "dirname", ",", "mode", ")" ]
python
Sandbox-bypassing version of ensure_directory()
true
2,684,229
def parse_link(value): """Return a list of parsed link headers proxies. i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg" :rtype: list """ links = [] replace_chars = ' \'"' value = value.strip(replace_chars) if not value: return links for val in re.split(', *<', value): try: url, params = val.split(';', 1) except ValueError: url, params = val, '' link = {'url': url.strip('<> \'"')} for param in params.split(';'): try: key, value = param.split('=') except ValueError: break link[key.strip(replace_chars)] = value.strip(replace_chars) links.append(link) return links
[ "def", "parse_link", "(", "value", ")", ":", "links", "=", "[", "]", "replace_chars", "=", "' \\'\"'", "value", "=", "value", ".", "strip", "(", "replace_chars", ")", "if", "not", "value", ":", "return", "links", "for", "val", "in", "re", ".", "split", "(", "', *<'", ",", "value", ")", ":", "try", ":", "url", ",", "params", "=", "val", ".", "split", "(", "';'", ",", "1", ")", "except", "ValueError", ":", "url", ",", "params", "=", "val", ",", "''", "link", "=", "{", "'url'", ":", "url", ".", "strip", "(", "'<> \\'\"'", ")", "}", "for", "param", "in", "params", ".", "split", "(", "';'", ")", ":", "try", ":", "key", ",", "value", "=", "param", ".", "split", "(", "'='", ")", "except", "ValueError", ":", "break", "link", "[", "key", ".", "strip", "(", "replace_chars", ")", "]", "=", "value", ".", "strip", "(", "replace_chars", ")", "links", ".", "append", "(", "link", ")", "return", "links" ]
python
Return a list of parsed link headers proxies. i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg" :rtype: list
true
2,684,341
def sort_by(items, attr): """ General sort filter - sorts by either attribute or key. """ def key_func(item): try: return getattr(item, attr) except AttributeError: try: return item[attr] except TypeError: getattr(item, attr) # Reraise AttributeError return sorted(items, key=key_func)
[ "def", "sort_by", "(", "items", ",", "attr", ")", ":", "def", "key_func", "(", "item", ")", ":", "try", ":", "return", "getattr", "(", "item", ",", "attr", ")", "except", "AttributeError", ":", "try", ":", "return", "item", "[", "attr", "]", "except", "TypeError", ":", "getattr", "(", "item", ",", "attr", ")", "return", "sorted", "(", "items", ",", "key", "=", "key_func", ")" ]
python
General sort filter - sorts by either attribute or key.
true
2,684,342
def ifinstalled(parser, token): """ Old-style ``if`` tag that renders contents if the given app is installed. The main use case is: {% ifinstalled app_name %} {% include "app_name/template.html" %} {% endifinstalled %} so we need to manually pull out all tokens if the app isn't installed, since if we used a normal ``if`` tag with a False arg, the include tag will still try and find the template to include. """ try: tag, app = token.split_contents() except ValueError: raise TemplateSyntaxError("ifinstalled should be in the form: " "{% ifinstalled app_name %}" "{% endifinstalled %}") end_tag = "end" + tag unmatched_end_tag = 1 if app.strip("\"'") not in settings.INSTALLED_APPS: while unmatched_end_tag: token = parser.tokens.pop(0) if token.token_type == TOKEN_BLOCK: block_name = token.contents.split()[0] if block_name == tag: unmatched_end_tag += 1 if block_name == end_tag: unmatched_end_tag -= 1 parser.tokens.insert(0, token) nodelist = parser.parse((end_tag,)) parser.delete_first_token() class IfInstalledNode(Node): def render(self, context): return nodelist.render(context) return IfInstalledNode()
[ "def", "ifinstalled", "(", "parser", ",", "token", ")", ":", "try", ":", "tag", ",", "app", "=", "token", ".", "split_contents", "(", ")", "except", "ValueError", ":", "raise", "TemplateSyntaxError", "(", "\"ifinstalled should be in the form: \"", "\"{% ifinstalled app_name %}\"", "\"{% endifinstalled %}\"", ")", "end_tag", "=", "\"end\"", "+", "tag", "unmatched_end_tag", "=", "1", "if", "app", ".", "strip", "(", "\"\\\"'\"", ")", "not", "in", "settings", ".", "INSTALLED_APPS", ":", "while", "unmatched_end_tag", ":", "token", "=", "parser", ".", "tokens", ".", "pop", "(", "0", ")", "if", "token", ".", "token_type", "==", "TOKEN_BLOCK", ":", "block_name", "=", "token", ".", "contents", ".", "split", "(", ")", "[", "0", "]", "if", "block_name", "==", "tag", ":", "unmatched_end_tag", "+=", "1", "if", "block_name", "==", "end_tag", ":", "unmatched_end_tag", "-=", "1", "parser", ".", "tokens", ".", "insert", "(", "0", ",", "token", ")", "nodelist", "=", "parser", ".", "parse", "(", "(", "end_tag", ",", ")", ")", "parser", ".", "delete_first_token", "(", ")", "class", "IfInstalledNode", "(", "Node", ")", ":", "def", "render", "(", "self", ",", "context", ")", ":", "return", "nodelist", ".", "render", "(", "context", ")", "return", "IfInstalledNode", "(", ")" ]
python
Old-style ``if`` tag that renders contents if the given app is installed. The main use case is: {% ifinstalled app_name %} {% include "app_name/template.html" %} {% endifinstalled %} so we need to manually pull out all tokens if the app isn't installed, since if we used a normal ``if`` tag with a False arg, the include tag will still try and find the template to include.
true
2,684,685
def run_setup(setup_script, args): """Run a distutils setup script, sandboxed in its directory""" old_dir = os.getcwd() save_argv = sys.argv[:] save_path = sys.path[:] setup_dir = os.path.abspath(os.path.dirname(setup_script)) temp_dir = os.path.join(setup_dir,'temp') if not os.path.isdir(temp_dir): os.makedirs(temp_dir) save_tmp = tempfile.tempdir save_modules = sys.modules.copy() pr_state = pkg_resources.__getstate__() try: tempfile.tempdir = temp_dir os.chdir(setup_dir) try: sys.argv[:] = [setup_script]+list(args) sys.path.insert(0, setup_dir) DirectorySandbox(setup_dir).run( lambda: execfile( "setup.py", {'__file__':setup_script, '__name__':'__main__'} ) ) except SystemExit, v: if v.args and v.args[0]: raise # Normal exit, just return finally: pkg_resources.__setstate__(pr_state) sys.modules.update(save_modules) # remove any modules imported within the sandbox del_modules = [ mod_name for mod_name in sys.modules if mod_name not in save_modules # exclude any encodings modules. See #285 and not mod_name.startswith('encodings.') ] map(sys.modules.__delitem__, del_modules) os.chdir(old_dir) sys.path[:] = save_path sys.argv[:] = save_argv tempfile.tempdir = save_tmp
[ "def", "run_setup", "(", "setup_script", ",", "args", ")", ":", "old_dir", "=", "os", ".", "getcwd", "(", ")", "save_argv", "=", "sys", ".", "argv", "[", ":", "]", "save_path", "=", "sys", ".", "path", "[", ":", "]", "setup_dir", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "setup_script", ")", ")", "temp_dir", "=", "os", ".", "path", ".", "join", "(", "setup_dir", ",", "'temp'", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "temp_dir", ")", ":", "os", ".", "makedirs", "(", "temp_dir", ")", "save_tmp", "=", "tempfile", ".", "tempdir", "save_modules", "=", "sys", ".", "modules", ".", "copy", "(", ")", "pr_state", "=", "pkg_resources", ".", "__getstate__", "(", ")", "try", ":", "tempfile", ".", "tempdir", "=", "temp_dir", "os", ".", "chdir", "(", "setup_dir", ")", "try", ":", "sys", ".", "argv", "[", ":", "]", "=", "[", "setup_script", "]", "+", "list", "(", "args", ")", "sys", ".", "path", ".", "insert", "(", "0", ",", "setup_dir", ")", "DirectorySandbox", "(", "setup_dir", ")", ".", "run", "(", "lambda", ":", "execfile", "(", "\"setup.py\"", ",", "{", "'__file__'", ":", "setup_script", ",", "'__name__'", ":", "'__main__'", "}", ")", ")", "except", "SystemExit", ",", "v", ":", "if", "v", ".", "args", "and", "v", ".", "args", "[", "0", "]", ":", "raise", "finally", ":", "pkg_resources", ".", "__setstate__", "(", "pr_state", ")", "sys", ".", "modules", ".", "update", "(", "save_modules", ")", "del_modules", "=", "[", "mod_name", "for", "mod_name", "in", "sys", ".", "modules", "if", "mod_name", "not", "in", "save_modules", "and", "not", "mod_name", ".", "startswith", "(", "'encodings.'", ")", "]", "map", "(", "sys", ".", "modules", ".", "__delitem__", ",", "del_modules", ")", "os", ".", "chdir", "(", "old_dir", ")", "sys", ".", "path", "[", ":", "]", "=", "save_path", "sys", ".", "argv", "[", ":", "]", "=", "save_argv", "tempfile", ".", "tempdir", "=", "save_tmp" ]
python
Run a distutils setup script, sandboxed in its directory
true
2,684,713
def indent(instr,nspaces=4, ntabs=0, flatten=False): """Indent a string a given number of spaces or tabstops. indent(str,nspaces=4,ntabs=0) -> indent str by ntabs+nspaces. Parameters ---------- instr : basestring The string to be indented. nspaces : int (default: 4) The number of spaces to be indented. ntabs : int (default: 0) The number of tabs to be indented. flatten : bool (default: False) Whether to scrub existing indentation. If True, all lines will be aligned to the same indentation. If False, existing indentation will be strictly increased. Returns ------- str|unicode : string indented by ntabs and nspaces. """ if instr is None: return ind = '\t'*ntabs+' '*nspaces if flatten: pat = re.compile(r'^\s*', re.MULTILINE) else: pat = re.compile(r'^', re.MULTILINE) outstr = re.sub(pat, ind, instr) if outstr.endswith(os.linesep+ind): return outstr[:-len(ind)] else: return outstr
[ "def", "indent", "(", "instr", ",", "nspaces", "=", "4", ",", "ntabs", "=", "0", ",", "flatten", "=", "False", ")", ":", "if", "instr", "is", "None", ":", "return", "ind", "=", "'\\t'", "*", "ntabs", "+", "' '", "*", "nspaces", "if", "flatten", ":", "pat", "=", "re", ".", "compile", "(", "r'^\\s*'", ",", "re", ".", "MULTILINE", ")", "else", ":", "pat", "=", "re", ".", "compile", "(", "r'^'", ",", "re", ".", "MULTILINE", ")", "outstr", "=", "re", ".", "sub", "(", "pat", ",", "ind", ",", "instr", ")", "if", "outstr", ".", "endswith", "(", "os", ".", "linesep", "+", "ind", ")", ":", "return", "outstr", "[", ":", "-", "len", "(", "ind", ")", "]", "else", ":", "return", "outstr" ]
python
Indent a string a given number of spaces or tabstops. indent(str,nspaces=4,ntabs=0) -> indent str by ntabs+nspaces. Parameters ---------- instr : basestring The string to be indented. nspaces : int (default: 4) The number of spaces to be indented. ntabs : int (default: 0) The number of tabs to be indented. flatten : bool (default: False) Whether to scrub existing indentation. If True, all lines will be aligned to the same indentation. If False, existing indentation will be strictly increased. Returns ------- str|unicode : string indented by ntabs and nspaces.
true
2,684,715
def marquee(txt='',width=78,mark='*'): """Return the input string centered in a 'marquee'. :Examples: In [16]: marquee('A test',40) Out[16]: '**************** A test ****************' In [17]: marquee('A test',40,'-') Out[17]: '---------------- A test ----------------' In [18]: marquee('A test',40,' ') Out[18]: ' A test ' """ if not txt: return (mark*width)[:width] nmark = (width-len(txt)-2)//len(mark)//2 if nmark < 0: nmark =0 marks = mark*nmark return '%s %s %s' % (marks,txt,marks)
[ "def", "marquee", "(", "txt", "=", "''", ",", "width", "=", "78", ",", "mark", "=", "'*'", ")", ":", "if", "not", "txt", ":", "return", "(", "mark", "*", "width", ")", "[", ":", "width", "]", "nmark", "=", "(", "width", "-", "len", "(", "txt", ")", "-", "2", ")", "//", "len", "(", "mark", ")", "//", "2", "if", "nmark", "<", "0", ":", "nmark", "=", "0", "marks", "=", "mark", "*", "nmark", "return", "'%s %s %s'", "%", "(", "marks", ",", "txt", ",", "marks", ")" ]
python
Return the input string centered in a 'marquee'. :Examples: In [16]: marquee('A test',40) Out[16]: '**************** A test ****************' In [17]: marquee('A test',40,'-') Out[17]: '---------------- A test ----------------' In [18]: marquee('A test',40,' ') Out[18]: ' A test '
true
2,684,716
def format_screen(strng): """Format a string for screen printing. This removes some latex-type format codes.""" # Paragraph continue par_re = re.compile(r'\\$',re.MULTILINE) strng = par_re.sub('',strng) return strng
[ "def", "format_screen", "(", "strng", ")", ":", "par_re", "=", "re", ".", "compile", "(", "r'\\\\$'", ",", "re", ".", "MULTILINE", ")", "strng", "=", "par_re", ".", "sub", "(", "''", ",", "strng", ")", "return", "strng" ]
python
Format a string for screen printing. This removes some latex-type format codes.
true
2,684,718
def wrap_paragraphs(text, ncols=80): """Wrap multiple paragraphs to fit a specified width. This is equivalent to textwrap.wrap, but with support for multiple paragraphs, as separated by empty lines. Returns ------- list of complete paragraphs, wrapped to fill `ncols` columns. """ paragraph_re = re.compile(r'\n(\s*\n)+', re.MULTILINE) text = dedent(text).strip() paragraphs = paragraph_re.split(text)[::2] # every other entry is space out_ps = [] indent_re = re.compile(r'\n\s+', re.MULTILINE) for p in paragraphs: # presume indentation that survives dedent is meaningful formatting, # so don't fill unless text is flush. if indent_re.search(p) is None: # wrap paragraph p = textwrap.fill(p, ncols) out_ps.append(p) return out_ps
[ "def", "wrap_paragraphs", "(", "text", ",", "ncols", "=", "80", ")", ":", "paragraph_re", "=", "re", ".", "compile", "(", "r'\\n(\\s*\\n)+'", ",", "re", ".", "MULTILINE", ")", "text", "=", "dedent", "(", "text", ")", ".", "strip", "(", ")", "paragraphs", "=", "paragraph_re", ".", "split", "(", "text", ")", "[", ":", ":", "2", "]", "out_ps", "=", "[", "]", "indent_re", "=", "re", ".", "compile", "(", "r'\\n\\s+'", ",", "re", ".", "MULTILINE", ")", "for", "p", "in", "paragraphs", ":", "if", "indent_re", ".", "search", "(", "p", ")", "is", "None", ":", "p", "=", "textwrap", ".", "fill", "(", "p", ",", "ncols", ")", "out_ps", ".", "append", "(", "p", ")", "return", "out_ps" ]
python
Wrap multiple paragraphs to fit a specified width. This is equivalent to textwrap.wrap, but with support for multiple paragraphs, as separated by empty lines. Returns ------- list of complete paragraphs, wrapped to fill `ncols` columns.
true
2,684,719
def long_substr(data): """Return the longest common substring in a list of strings. Credit: http://stackoverflow.com/questions/2892931/longest-common-substring-from-more-than-two-strings-python """ substr = '' if len(data) > 1 and len(data[0]) > 0: for i in range(len(data[0])): for j in range(len(data[0])-i+1): if j > len(substr) and all(data[0][i:i+j] in x for x in data): substr = data[0][i:i+j] elif len(data) == 1: substr = data[0] return substr
[ "def", "long_substr", "(", "data", ")", ":", "substr", "=", "''", "if", "len", "(", "data", ")", ">", "1", "and", "len", "(", "data", "[", "0", "]", ")", ">", "0", ":", "for", "i", "in", "range", "(", "len", "(", "data", "[", "0", "]", ")", ")", ":", "for", "j", "in", "range", "(", "len", "(", "data", "[", "0", "]", ")", "-", "i", "+", "1", ")", ":", "if", "j", ">", "len", "(", "substr", ")", "and", "all", "(", "data", "[", "0", "]", "[", "i", ":", "i", "+", "j", "]", "in", "x", "for", "x", "in", "data", ")", ":", "substr", "=", "data", "[", "0", "]", "[", "i", ":", "i", "+", "j", "]", "elif", "len", "(", "data", ")", "==", "1", ":", "substr", "=", "data", "[", "0", "]", "return", "substr" ]
python
Return the longest common substring in a list of strings. Credit: http://stackoverflow.com/questions/2892931/longest-common-substring-from-more-than-two-strings-python
true
2,684,720
def strip_email_quotes(text): """Strip leading email quotation characters ('>'). Removes any combination of leading '>' interspersed with whitespace that appears *identically* in all lines of the input text. Parameters ---------- text : str Examples -------- Simple uses:: In [2]: strip_email_quotes('> > text') Out[2]: 'text' In [3]: strip_email_quotes('> > text\\n> > more') Out[3]: 'text\\nmore' Note how only the common prefix that appears in all lines is stripped:: In [4]: strip_email_quotes('> > text\\n> > more\\n> more...') Out[4]: '> text\\n> more\\nmore...' So if any line has no quote marks ('>') , then none are stripped from any of them :: In [5]: strip_email_quotes('> > text\\n> > more\\nlast different') Out[5]: '> > text\\n> > more\\nlast different' """ lines = text.splitlines() matches = set() for line in lines: prefix = re.match(r'^(\s*>[ >]*)', line) if prefix: matches.add(prefix.group(1)) else: break else: prefix = long_substr(list(matches)) if prefix: strip = len(prefix) text = '\n'.join([ ln[strip:] for ln in lines]) return text
[ "def", "strip_email_quotes", "(", "text", ")", ":", "lines", "=", "text", ".", "splitlines", "(", ")", "matches", "=", "set", "(", ")", "for", "line", "in", "lines", ":", "prefix", "=", "re", ".", "match", "(", "r'^(\\s*>[ >]*)'", ",", "line", ")", "if", "prefix", ":", "matches", ".", "add", "(", "prefix", ".", "group", "(", "1", ")", ")", "else", ":", "break", "else", ":", "prefix", "=", "long_substr", "(", "list", "(", "matches", ")", ")", "if", "prefix", ":", "strip", "=", "len", "(", "prefix", ")", "text", "=", "'\\n'", ".", "join", "(", "[", "ln", "[", "strip", ":", "]", "for", "ln", "in", "lines", "]", ")", "return", "text" ]
python
Strip leading email quotation characters ('>'). Removes any combination of leading '>' interspersed with whitespace that appears *identically* in all lines of the input text. Parameters ---------- text : str Examples -------- Simple uses:: In [2]: strip_email_quotes('> > text') Out[2]: 'text' In [3]: strip_email_quotes('> > text\\n> > more') Out[3]: 'text\\nmore' Note how only the common prefix that appears in all lines is stripped:: In [4]: strip_email_quotes('> > text\\n> > more\\n> more...') Out[4]: '> text\\n> more\\nmore...' So if any line has no quote marks ('>') , then none are stripped from any of them :: In [5]: strip_email_quotes('> > text\\n> > more\\nlast different') Out[5]: '> > text\\n> > more\\nlast different'
true
2,684,722
def _get_or_default(mylist, i, default=None): """return list item number, or default if don't exist""" if i >= len(mylist): return default else : return mylist[i]
[ "def", "_get_or_default", "(", "mylist", ",", "i", ",", "default", "=", "None", ")", ":", "if", "i", ">=", "len", "(", "mylist", ")", ":", "return", "default", "else", ":", "return", "mylist", "[", "i", "]" ]
python
return list item number, or default if don't exist
true
2,684,724
def columnize(items, separator=' ', displaywidth=80): """ Transform a list of strings into a single string with columns. Parameters ---------- items : sequence of strings The strings to process. separator : str, optional [default is two spaces] The string that separates columns. displaywidth : int, optional [default is 80] Width of the display in number of characters. Returns ------- The formatted string. """ if not items : return '\n' matrix, info = compute_item_matrix(items, separator_size=len(separator), displaywidth=displaywidth) fmatrix = [filter(None, x) for x in matrix] sjoin = lambda x : separator.join([ y.ljust(w, ' ') for y, w in zip(x, info['columns_width'])]) return '\n'.join(map(sjoin, fmatrix))+'\n'
[ "def", "columnize", "(", "items", ",", "separator", "=", "' '", ",", "displaywidth", "=", "80", ")", ":", "if", "not", "items", ":", "return", "'\\n'", "matrix", ",", "info", "=", "compute_item_matrix", "(", "items", ",", "separator_size", "=", "len", "(", "separator", ")", ",", "displaywidth", "=", "displaywidth", ")", "fmatrix", "=", "[", "filter", "(", "None", ",", "x", ")", "for", "x", "in", "matrix", "]", "sjoin", "=", "lambda", "x", ":", "separator", ".", "join", "(", "[", "y", ".", "ljust", "(", "w", ",", "' '", ")", "for", "y", ",", "w", "in", "zip", "(", "x", ",", "info", "[", "'columns_width'", "]", ")", "]", ")", "return", "'\\n'", ".", "join", "(", "map", "(", "sjoin", ",", "fmatrix", ")", ")", "+", "'\\n'" ]
python
Transform a list of strings into a single string with columns. Parameters ---------- items : sequence of strings The strings to process. separator : str, optional [default is two spaces] The string that separates columns. displaywidth : int, optional [default is 80] Width of the display in number of characters. Returns ------- The formatted string.
true
2,684,805
def read_py_file(filename, skip_encoding_cookie=True): """Read a Python file, using the encoding declared inside the file. Parameters ---------- filename : str The path to the file to read. skip_encoding_cookie : bool If True (the default), and the encoding declaration is found in the first two lines, that line will be excluded from the output - compiling a unicode string with an encoding declaration is a SyntaxError in Python 2. Returns ------- A unicode string containing the contents of the file. """ with open(filename) as f: # the open function defined in this module. if skip_encoding_cookie: return "".join(strip_encoding_cookie(f)) else: return f.read()
[ "def", "read_py_file", "(", "filename", ",", "skip_encoding_cookie", "=", "True", ")", ":", "with", "open", "(", "filename", ")", "as", "f", ":", "if", "skip_encoding_cookie", ":", "return", "\"\"", ".", "join", "(", "strip_encoding_cookie", "(", "f", ")", ")", "else", ":", "return", "f", ".", "read", "(", ")" ]
python
Read a Python file, using the encoding declared inside the file. Parameters ---------- filename : str The path to the file to read. skip_encoding_cookie : bool If True (the default), and the encoding declaration is found in the first two lines, that line will be excluded from the output - compiling a unicode string with an encoding declaration is a SyntaxError in Python 2. Returns ------- A unicode string containing the contents of the file.
true
2,684,878
def Instance2Str(o, d): """ Convert an Instance to a string representation. If the __str__() method produces acceptable output, then you don't need to add the class to conversions; it will be handled by the default converter. If the exact class is not found in d, it will use the first class it can find for which o is an instance. """ if o.__class__ in d: return d[o.__class__](o, d) cl = filter(lambda x,o=o: type(x) is ClassType and isinstance(o, x), d.keys()) if not cl: cl = filter(lambda x,o=o: type(x) is TypeType and isinstance(o, x) and d[x] is not Instance2Str, d.keys()) if not cl: return d[StringType](o,d) d[o.__class__] = d[cl[0]] return d[cl[0]](o, d)
[ "def", "Instance2Str", "(", "o", ",", "d", ")", ":", "if", "o", ".", "__class__", "in", "d", ":", "return", "d", "[", "o", ".", "__class__", "]", "(", "o", ",", "d", ")", "cl", "=", "filter", "(", "lambda", "x", ",", "o", "=", "o", ":", "type", "(", "x", ")", "is", "ClassType", "and", "isinstance", "(", "o", ",", "x", ")", ",", "d", ".", "keys", "(", ")", ")", "if", "not", "cl", ":", "cl", "=", "filter", "(", "lambda", "x", ",", "o", "=", "o", ":", "type", "(", "x", ")", "is", "TypeType", "and", "isinstance", "(", "o", ",", "x", ")", "and", "d", "[", "x", "]", "is", "not", "Instance2Str", ",", "d", ".", "keys", "(", ")", ")", "if", "not", "cl", ":", "return", "d", "[", "StringType", "]", "(", "o", ",", "d", ")", "d", "[", "o", ".", "__class__", "]", "=", "d", "[", "cl", "[", "0", "]", "]", "return", "d", "[", "cl", "[", "0", "]", "]", "(", "o", ",", "d", ")" ]
python
Convert an Instance to a string representation. If the __str__() method produces acceptable output, then you don't need to add the class to conversions; it will be handled by the default converter. If the exact class is not found in d, it will use the first class it can find for which o is an instance.
true
2,684,941
def build_fault(cls, e, tb, include_traceback=False): """ Builds a L{ErrorFault<pyamf.remoting.ErrorFault>} object based on the last exception raised. If include_traceback is C{False} then the traceback will not be added to the L{remoting.ErrorFault}. """ if hasattr(cls, '_amf_code'): code = cls._amf_code else: code = cls.__name__ details = None if include_traceback: details = traceback.format_exception(cls, e, tb) return remoting.ErrorFault(code=code, description=unicode(e), details=details)
[ "def", "build_fault", "(", "cls", ",", "e", ",", "tb", ",", "include_traceback", "=", "False", ")", ":", "if", "hasattr", "(", "cls", ",", "'_amf_code'", ")", ":", "code", "=", "cls", ".", "_amf_code", "else", ":", "code", "=", "cls", ".", "__name__", "details", "=", "None", "if", "include_traceback", ":", "details", "=", "traceback", ".", "format_exception", "(", "cls", ",", "e", ",", "tb", ")", "return", "remoting", ".", "ErrorFault", "(", "code", "=", "code", ",", "description", "=", "unicode", "(", "e", ")", ",", "details", "=", "details", ")" ]
python
Builds a L{ErrorFault<pyamf.remoting.ErrorFault>} object based on the last exception raised. If include_traceback is C{False} then the traceback will not be added to the L{remoting.ErrorFault}.
true
2,685,080
def _seq_pprinter_factory(start, end, basetype): """ Factory that returns a pprint function useful for sequences. Used by the default pprint for tuples, dicts, lists, sets and frozensets. """ def inner(obj, p, cycle): typ = type(obj) if basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__: # If the subclass provides its own repr, use it instead. return p.text(typ.__repr__(obj)) if cycle: return p.text(start + '...' + end) step = len(start) p.begin_group(step, start) for idx, x in enumerate(obj): if idx: p.text(',') p.breakable() p.pretty(x) if len(obj) == 1 and type(obj) is tuple: # Special case for 1-item tuples. p.text(',') p.end_group(step, end) return inner
[ "def", "_seq_pprinter_factory", "(", "start", ",", "end", ",", "basetype", ")", ":", "def", "inner", "(", "obj", ",", "p", ",", "cycle", ")", ":", "typ", "=", "type", "(", "obj", ")", "if", "basetype", "is", "not", "None", "and", "typ", "is", "not", "basetype", "and", "typ", ".", "__repr__", "!=", "basetype", ".", "__repr__", ":", "return", "p", ".", "text", "(", "typ", ".", "__repr__", "(", "obj", ")", ")", "if", "cycle", ":", "return", "p", ".", "text", "(", "start", "+", "'...'", "+", "end", ")", "step", "=", "len", "(", "start", ")", "p", ".", "begin_group", "(", "step", ",", "start", ")", "for", "idx", ",", "x", "in", "enumerate", "(", "obj", ")", ":", "if", "idx", ":", "p", ".", "text", "(", "','", ")", "p", ".", "breakable", "(", ")", "p", ".", "pretty", "(", "x", ")", "if", "len", "(", "obj", ")", "==", "1", "and", "type", "(", "obj", ")", "is", "tuple", ":", "p", ".", "text", "(", "','", ")", "p", ".", "end_group", "(", "step", ",", "end", ")", "return", "inner" ]
python
Factory that returns a pprint function useful for sequences. Used by the default pprint for tuples, dicts, lists, sets and frozensets.
true
2,685,083
def _re_pattern_pprint(obj, p, cycle): """The pprint function for regular expression patterns.""" p.text('re.compile(') pattern = repr(obj.pattern) if pattern[:1] in 'uU': pattern = pattern[1:] prefix = 'ur' else: prefix = 'r' pattern = prefix + pattern.replace('\\\\', '\\') p.text(pattern) if obj.flags: p.text(',') p.breakable() done_one = False for flag in ('TEMPLATE', 'IGNORECASE', 'LOCALE', 'MULTILINE', 'DOTALL', 'UNICODE', 'VERBOSE', 'DEBUG'): if obj.flags & getattr(re, flag): if done_one: p.text('|') p.text('re.' + flag) done_one = True p.text(')')
[ "def", "_re_pattern_pprint", "(", "obj", ",", "p", ",", "cycle", ")", ":", "p", ".", "text", "(", "'re.compile('", ")", "pattern", "=", "repr", "(", "obj", ".", "pattern", ")", "if", "pattern", "[", ":", "1", "]", "in", "'uU'", ":", "pattern", "=", "pattern", "[", "1", ":", "]", "prefix", "=", "'ur'", "else", ":", "prefix", "=", "'r'", "pattern", "=", "prefix", "+", "pattern", ".", "replace", "(", "'\\\\\\\\'", ",", "'\\\\'", ")", "p", ".", "text", "(", "pattern", ")", "if", "obj", ".", "flags", ":", "p", ".", "text", "(", "','", ")", "p", ".", "breakable", "(", ")", "done_one", "=", "False", "for", "flag", "in", "(", "'TEMPLATE'", ",", "'IGNORECASE'", ",", "'LOCALE'", ",", "'MULTILINE'", ",", "'DOTALL'", ",", "'UNICODE'", ",", "'VERBOSE'", ",", "'DEBUG'", ")", ":", "if", "obj", ".", "flags", "&", "getattr", "(", "re", ",", "flag", ")", ":", "if", "done_one", ":", "p", ".", "text", "(", "'|'", ")", "p", ".", "text", "(", "'re.'", "+", "flag", ")", "done_one", "=", "True", "p", ".", "text", "(", "')'", ")" ]
python
The pprint function for regular expression patterns.
true
2,685,086
def _exception_pprint(obj, p, cycle): """Base pprint for all exceptions.""" if obj.__class__.__module__ in ('exceptions', 'builtins'): name = obj.__class__.__name__ else: name = '%s.%s' % ( obj.__class__.__module__, obj.__class__.__name__ ) step = len(name) + 1 p.begin_group(step, name + '(') for idx, arg in enumerate(getattr(obj, 'args', ())): if idx: p.text(',') p.breakable() p.pretty(arg) p.end_group(step, ')')
[ "def", "_exception_pprint", "(", "obj", ",", "p", ",", "cycle", ")", ":", "if", "obj", ".", "__class__", ".", "__module__", "in", "(", "'exceptions'", ",", "'builtins'", ")", ":", "name", "=", "obj", ".", "__class__", ".", "__name__", "else", ":", "name", "=", "'%s.%s'", "%", "(", "obj", ".", "__class__", ".", "__module__", ",", "obj", ".", "__class__", ".", "__name__", ")", "step", "=", "len", "(", "name", ")", "+", "1", "p", ".", "begin_group", "(", "step", ",", "name", "+", "'('", ")", "for", "idx", ",", "arg", "in", "enumerate", "(", "getattr", "(", "obj", ",", "'args'", ",", "(", ")", ")", ")", ":", "if", "idx", ":", "p", ".", "text", "(", "','", ")", "p", ".", "breakable", "(", ")", "p", ".", "pretty", "(", "arg", ")", "p", ".", "end_group", "(", "step", ",", "')'", ")" ]
python
Base pprint for all exceptions.
true
2,685,087
def for_type(typ, func): """ Add a pretty printer for a given type. """ oldfunc = _type_pprinters.get(typ, None) if func is not None: # To support easy restoration of old pprinters, we need to ignore Nones. _type_pprinters[typ] = func return oldfunc
[ "def", "for_type", "(", "typ", ",", "func", ")", ":", "oldfunc", "=", "_type_pprinters", ".", "get", "(", "typ", ",", "None", ")", "if", "func", "is", "not", "None", ":", "_type_pprinters", "[", "typ", "]", "=", "func", "return", "oldfunc" ]
python
Add a pretty printer for a given type.
true
2,685,088
def for_type_by_name(type_module, type_name, func): """ Add a pretty printer for a type specified by the module and name of a type rather than the type object itself. """ key = (type_module, type_name) oldfunc = _deferred_type_pprinters.get(key, None) if func is not None: # To support easy restoration of old pprinters, we need to ignore Nones. _deferred_type_pprinters[key] = func return oldfunc
[ "def", "for_type_by_name", "(", "type_module", ",", "type_name", ",", "func", ")", ":", "key", "=", "(", "type_module", ",", "type_name", ")", "oldfunc", "=", "_deferred_type_pprinters", ".", "get", "(", "key", ",", "None", ")", "if", "func", "is", "not", "None", ":", "_deferred_type_pprinters", "[", "key", "]", "=", "func", "return", "oldfunc" ]
python
Add a pretty printer for a type specified by the module and name of a type rather than the type object itself.
true
2,685,230
def parse_field_path(field_path): """ Take a path to a field like "yacms.pages.models.Page.feature_image" and return a model key, which is a tuple of the form ('pages', 'page'), and a field name, e.g. "feature_image". """ model_path, field_name = field_path.rsplit(".", 1) app_name, model_name = model_path.split('.models.') _, app_label = app_name.rsplit('.', 1) return (app_label, model_name.lower()), field_name
[ "def", "parse_field_path", "(", "field_path", ")", ":", "model_path", ",", "field_name", "=", "field_path", ".", "rsplit", "(", "\".\"", ",", "1", ")", "app_name", ",", "model_name", "=", "model_path", ".", "split", "(", "'.models.'", ")", "_", ",", "app_label", "=", "app_name", ".", "rsplit", "(", "'.'", ",", "1", ")", "return", "(", "app_label", ",", "model_name", ".", "lower", "(", ")", ")", ",", "field_name" ]
python
Take a path to a field like "yacms.pages.models.Page.feature_image" and return a model key, which is a tuple of the form ('pages', 'page'), and a field name, e.g. "feature_image".
true
2,685,232
def parse_extra_model_fields(extra_model_fields): """ Parses the value of EXTRA_MODEL_FIELDS, grouping the entries by model and instantiating the extra fields. Returns a sequence of tuples of the form (model_key, fields) where model_key is a pair of app_label, model_name and fields is a list of (field_name, field_instance) pairs. """ fields = defaultdict(list) for entry in extra_model_fields: model_key, field_name = parse_field_path(entry[0]) field_class = import_field(entry[1]) field_args, field_kwargs = entry[2:] try: field = field_class(*field_args, **field_kwargs) except TypeError as e: raise ImproperlyConfigured( "The EXTRA_MODEL_FIELDS setting contains arguments for the " "field '%s' which could not be applied: %s" % (entry[1], e)) fields[model_key].append((field_name, field)) return fields
[ "def", "parse_extra_model_fields", "(", "extra_model_fields", ")", ":", "fields", "=", "defaultdict", "(", "list", ")", "for", "entry", "in", "extra_model_fields", ":", "model_key", ",", "field_name", "=", "parse_field_path", "(", "entry", "[", "0", "]", ")", "field_class", "=", "import_field", "(", "entry", "[", "1", "]", ")", "field_args", ",", "field_kwargs", "=", "entry", "[", "2", ":", "]", "try", ":", "field", "=", "field_class", "(", "*", "field_args", ",", "**", "field_kwargs", ")", "except", "TypeError", "as", "e", ":", "raise", "ImproperlyConfigured", "(", "\"The EXTRA_MODEL_FIELDS setting contains arguments for the \"", "\"field '%s' which could not be applied: %s\"", "%", "(", "entry", "[", "1", "]", ",", "e", ")", ")", "fields", "[", "model_key", "]", ".", "append", "(", "(", "field_name", ",", "field", ")", ")", "return", "fields" ]
python
Parses the value of EXTRA_MODEL_FIELDS, grouping the entries by model and instantiating the extra fields. Returns a sequence of tuples of the form (model_key, fields) where model_key is a pair of app_label, model_name and fields is a list of (field_name, field_instance) pairs.
true
2,685,233
def add_extra_model_fields(sender, **kwargs): """ Injects custom fields onto the given sender model as defined by the ``EXTRA_MODEL_FIELDS`` setting. This is a closure over the "fields" variable. """ model_key = sender._meta.app_label, sender._meta.model_name for field_name, field in fields.get(model_key, {}): field.contribute_to_class(sender, field_name)
[ "def", "add_extra_model_fields", "(", "sender", ",", "**", "kwargs", ")", ":", "model_key", "=", "sender", ".", "_meta", ".", "app_label", ",", "sender", ".", "_meta", ".", "model_name", "for", "field_name", ",", "field", "in", "fields", ".", "get", "(", "model_key", ",", "{", "}", ")", ":", "field", ".", "contribute_to_class", "(", "sender", ",", "field_name", ")" ]
python
Injects custom fields onto the given sender model as defined by the ``EXTRA_MODEL_FIELDS`` setting. This is a closure over the "fields" variable.
true
2,686,386
def split_addresses(email_string_list): """ Converts a string containing comma separated email addresses into a list of email addresses. """ return [f for f in [s.strip() for s in email_string_list.split(",")] if f]
[ "def", "split_addresses", "(", "email_string_list", ")", ":", "return", "[", "f", "for", "f", "in", "[", "s", ".", "strip", "(", ")", "for", "s", "in", "email_string_list", ".", "split", "(", "\",\"", ")", "]", "if", "f", "]" ]
python
Converts a string containing comma separated email addresses into a list of email addresses.
true
2,686,387
def subject_template(template, context): """ Loads and renders an email subject template, returning the subject string. """ subject = loader.get_template(template).render(Context(context)) return " ".join(subject.splitlines()).strip()
[ "def", "subject_template", "(", "template", ",", "context", ")", ":", "subject", "=", "loader", ".", "get_template", "(", "template", ")", ".", "render", "(", "Context", "(", "context", ")", ")", "return", "\" \"", ".", "join", "(", "subject", ".", "splitlines", "(", ")", ")", ".", "strip", "(", ")" ]
python
Loads and renders an email subject template, returning the subject string.
true