id_within_dataset
int64 46
2.71M
| snippet
stringlengths 63
481k
| tokens
sequencelengths 20
15.6k
| language
stringclasses 2
values | nl
stringlengths 1
32.4k
| is_duplicated
bool 2
classes |
---|---|---|---|---|---|
2,535,521 | def setup_logger(debug, color):
"""Configure the logger."""
if debug:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logger = logging.getLogger('exifread')
stream = Handler(log_level, debug, color)
logger.addHandler(stream)
logger.setLevel(log_level) | [
"def",
"setup_logger",
"(",
"debug",
",",
"color",
")",
":",
"if",
"debug",
":",
"log_level",
"=",
"logging",
".",
"DEBUG",
"else",
":",
"log_level",
"=",
"logging",
".",
"INFO",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"'exifread'",
")",
"stream",
"=",
"Handler",
"(",
"log_level",
",",
"debug",
",",
"color",
")",
"logger",
".",
"addHandler",
"(",
"stream",
")",
"logger",
".",
"setLevel",
"(",
"log_level",
")"
] | python | Configure the logger. | true |
2,536,039 | def make_string(seq):
"""
Don't throw an exception when given an out of range character.
"""
string = ''
for c in seq:
# Screen out non-printing characters
try:
if 32 <= c and c < 256:
string += chr(c)
except TypeError:
pass
# If no printing chars
if not string:
return str(seq)
return string | [
"def",
"make_string",
"(",
"seq",
")",
":",
"string",
"=",
"''",
"for",
"c",
"in",
"seq",
":",
"try",
":",
"if",
"32",
"<=",
"c",
"and",
"c",
"<",
"256",
":",
"string",
"+=",
"chr",
"(",
"c",
")",
"except",
"TypeError",
":",
"pass",
"if",
"not",
"string",
":",
"return",
"str",
"(",
"seq",
")",
"return",
"string"
] | python | Don't throw an exception when given an out of range character. | true |
2,536,040 | def s2n_motorola(string):
"""Extract multi-byte integer in Motorola format (little endian)."""
x = 0
for c in string:
x = (x << 8) | ord_(c)
return x | [
"def",
"s2n_motorola",
"(",
"string",
")",
":",
"x",
"=",
"0",
"for",
"c",
"in",
"string",
":",
"x",
"=",
"(",
"x",
"<<",
"8",
")",
"|",
"ord_",
"(",
"c",
")",
"return",
"x"
] | python | Extract multi-byte integer in Motorola format (little endian). | true |
2,536,041 | def s2n_intel(string):
"""Extract multi-byte integer in Intel format (big endian)."""
x = 0
y = 0
for c in string:
x = x | (ord_(c) << y)
y += + 8
return x | [
"def",
"s2n_intel",
"(",
"string",
")",
":",
"x",
"=",
"0",
"y",
"=",
"0",
"for",
"c",
"in",
"string",
":",
"x",
"=",
"x",
"|",
"(",
"ord_",
"(",
"c",
")",
"<<",
"y",
")",
"y",
"+=",
"+",
"8",
"return",
"x"
] | python | Extract multi-byte integer in Intel format (big endian). | true |
2,536,434 | def _getcallargs(func, positional, named):
"""Get the mapping of arguments to values.
Generates a dict, with keys being the function argument names
(including the names of the * and ** arguments, if any), and
values the respective bound values from 'positional' and 'named'.
A parameter for the request is injected. Returns a tuple of the
dict, the object the method is being called on, and the name of
the injected request argument.
"""
args, varargs, varkw, defaults = inspect.getargspec(func)
f_name = func.__name__
arg2value = {}
# The following closures are basically because of tuple parameter
# unpacking.
assigned_tuple_params = []
def assign(arg, value):
if isinstance(arg, str):
arg2value[arg] = value
else:
assigned_tuple_params.append(arg)
value = iter(value)
for i, subarg in enumerate(arg):
try:
subvalue = next(value)
except StopIteration:
raise ValueError('need more than %d %s to unpack' %
(i, 'values' if i > 1 else 'value'))
assign(subarg, subvalue)
try:
next(value)
except StopIteration:
pass
else:
raise ValueError('too many values to unpack')
def is_assigned(arg):
if isinstance(arg, str):
return arg in arg2value
return arg in assigned_tuple_params
# Inject a place-holder for the request and get the self and the
# req_name
positional = positional[:1] + (None,) + positional[1:]
theSelf = positional[0]
req_name = args[1]
num_pos = len(positional)
num_total = num_pos + len(named)
num_args = len(args)
num_defaults = len(defaults) if defaults else 0
# Start with our positional parameters...
for arg, value in zip(args, positional):
assign(arg, value)
# Deal with the variable argument list...
if varargs:
if num_pos > num_args:
assign(varargs, positional[-(num_pos-num_args):])
else:
assign(varargs, ())
elif 0 < num_args < num_pos:
raise TypeError('%s() takes %s %d %s (%d given)' % (
f_name, 'at most' if defaults else 'exactly', num_args,
'arguments' if num_args > 1 else 'argument', num_total))
elif num_args == 0 and num_total:
raise TypeError('%s() takes no arguments (%d given)' %
(f_name, num_total))
# Exclusion rules on keyword arguments
for arg in args:
if isinstance(arg, str) and arg in named:
if is_assigned(arg):
raise TypeError("%s() got multiple values for keyword "
"argument '%s'" % (f_name, arg))
else:
assign(arg, named.pop(arg))
# Fill in any missing values with the defaults
if defaults:
for arg, value in zip(args[-num_defaults:], defaults):
if not is_assigned(arg):
assign(arg, value)
# Handle the **names
if varkw:
assign(varkw, named)
elif named:
unexpected = next(iter(named))
if isinstance(unexpected, unicode):
unexpected = unexpected.encode(sys.getdefaultencoding(), 'replace')
raise TypeError("%s() got an unexpected keyword argument '%s'" %
(f_name, unexpected))
# Anything left over?
unassigned = num_args - len([arg for arg in args if is_assigned(arg)])
if unassigned:
num_required = num_args - num_defaults
raise TypeError('%s() takes %s %d %s (%d given)' % (
f_name, 'at least' if defaults else 'exactly', num_required,
'arguments' if num_required > 1 else 'argument', num_total))
# Return the mapping and the name of the request argument
return arg2value, theSelf, req_name | [
"def",
"_getcallargs",
"(",
"func",
",",
"positional",
",",
"named",
")",
":",
"args",
",",
"varargs",
",",
"varkw",
",",
"defaults",
"=",
"inspect",
".",
"getargspec",
"(",
"func",
")",
"f_name",
"=",
"func",
".",
"__name__",
"arg2value",
"=",
"{",
"}",
"assigned_tuple_params",
"=",
"[",
"]",
"def",
"assign",
"(",
"arg",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"arg",
",",
"str",
")",
":",
"arg2value",
"[",
"arg",
"]",
"=",
"value",
"else",
":",
"assigned_tuple_params",
".",
"append",
"(",
"arg",
")",
"value",
"=",
"iter",
"(",
"value",
")",
"for",
"i",
",",
"subarg",
"in",
"enumerate",
"(",
"arg",
")",
":",
"try",
":",
"subvalue",
"=",
"next",
"(",
"value",
")",
"except",
"StopIteration",
":",
"raise",
"ValueError",
"(",
"'need more than %d %s to unpack'",
"%",
"(",
"i",
",",
"'values'",
"if",
"i",
">",
"1",
"else",
"'value'",
")",
")",
"assign",
"(",
"subarg",
",",
"subvalue",
")",
"try",
":",
"next",
"(",
"value",
")",
"except",
"StopIteration",
":",
"pass",
"else",
":",
"raise",
"ValueError",
"(",
"'too many values to unpack'",
")",
"def",
"is_assigned",
"(",
"arg",
")",
":",
"if",
"isinstance",
"(",
"arg",
",",
"str",
")",
":",
"return",
"arg",
"in",
"arg2value",
"return",
"arg",
"in",
"assigned_tuple_params",
"positional",
"=",
"positional",
"[",
":",
"1",
"]",
"+",
"(",
"None",
",",
")",
"+",
"positional",
"[",
"1",
":",
"]",
"theSelf",
"=",
"positional",
"[",
"0",
"]",
"req_name",
"=",
"args",
"[",
"1",
"]",
"num_pos",
"=",
"len",
"(",
"positional",
")",
"num_total",
"=",
"num_pos",
"+",
"len",
"(",
"named",
")",
"num_args",
"=",
"len",
"(",
"args",
")",
"num_defaults",
"=",
"len",
"(",
"defaults",
")",
"if",
"defaults",
"else",
"0",
"for",
"arg",
",",
"value",
"in",
"zip",
"(",
"args",
",",
"positional",
")",
":",
"assign",
"(",
"arg",
",",
"value",
")",
"if",
"varargs",
":",
"if",
"num_pos",
">",
"num_args",
":",
"assign",
"(",
"varargs",
",",
"positional",
"[",
"-",
"(",
"num_pos",
"-",
"num_args",
")",
":",
"]",
")",
"else",
":",
"assign",
"(",
"varargs",
",",
"(",
")",
")",
"elif",
"0",
"<",
"num_args",
"<",
"num_pos",
":",
"raise",
"TypeError",
"(",
"'%s() takes %s %d %s (%d given)'",
"%",
"(",
"f_name",
",",
"'at most'",
"if",
"defaults",
"else",
"'exactly'",
",",
"num_args",
",",
"'arguments'",
"if",
"num_args",
">",
"1",
"else",
"'argument'",
",",
"num_total",
")",
")",
"elif",
"num_args",
"==",
"0",
"and",
"num_total",
":",
"raise",
"TypeError",
"(",
"'%s() takes no arguments (%d given)'",
"%",
"(",
"f_name",
",",
"num_total",
")",
")",
"for",
"arg",
"in",
"args",
":",
"if",
"isinstance",
"(",
"arg",
",",
"str",
")",
"and",
"arg",
"in",
"named",
":",
"if",
"is_assigned",
"(",
"arg",
")",
":",
"raise",
"TypeError",
"(",
"\"%s() got multiple values for keyword \"",
"\"argument '%s'\"",
"%",
"(",
"f_name",
",",
"arg",
")",
")",
"else",
":",
"assign",
"(",
"arg",
",",
"named",
".",
"pop",
"(",
"arg",
")",
")",
"if",
"defaults",
":",
"for",
"arg",
",",
"value",
"in",
"zip",
"(",
"args",
"[",
"-",
"num_defaults",
":",
"]",
",",
"defaults",
")",
":",
"if",
"not",
"is_assigned",
"(",
"arg",
")",
":",
"assign",
"(",
"arg",
",",
"value",
")",
"if",
"varkw",
":",
"assign",
"(",
"varkw",
",",
"named",
")",
"elif",
"named",
":",
"unexpected",
"=",
"next",
"(",
"iter",
"(",
"named",
")",
")",
"if",
"isinstance",
"(",
"unexpected",
",",
"unicode",
")",
":",
"unexpected",
"=",
"unexpected",
".",
"encode",
"(",
"sys",
".",
"getdefaultencoding",
"(",
")",
",",
"'replace'",
")",
"raise",
"TypeError",
"(",
"\"%s() got an unexpected keyword argument '%s'\"",
"%",
"(",
"f_name",
",",
"unexpected",
")",
")",
"unassigned",
"=",
"num_args",
"-",
"len",
"(",
"[",
"arg",
"for",
"arg",
"in",
"args",
"if",
"is_assigned",
"(",
"arg",
")",
"]",
")",
"if",
"unassigned",
":",
"num_required",
"=",
"num_args",
"-",
"num_defaults",
"raise",
"TypeError",
"(",
"'%s() takes %s %d %s (%d given)'",
"%",
"(",
"f_name",
",",
"'at least'",
"if",
"defaults",
"else",
"'exactly'",
",",
"num_required",
",",
"'arguments'",
"if",
"num_required",
">",
"1",
"else",
"'argument'",
",",
"num_total",
")",
")",
"return",
"arg2value",
",",
"theSelf",
",",
"req_name"
] | python | Get the mapping of arguments to values.
Generates a dict, with keys being the function argument names
(including the names of the * and ** arguments, if any), and
values the respective bound values from 'positional' and 'named'.
A parameter for the request is injected. Returns a tuple of the
dict, the object the method is being called on, and the name of
the injected request argument. | true |
2,536,499 | def uuid5(namespace, name):
"""Generate a UUID from the SHA-1 hash of a namespace UUID and a name."""
from hashlib import sha1
hash = sha1(namespace.bytes + name).digest()
return UUID(bytes=hash[:16], version=5) | [
"def",
"uuid5",
"(",
"namespace",
",",
"name",
")",
":",
"from",
"hashlib",
"import",
"sha1",
"hash",
"=",
"sha1",
"(",
"namespace",
".",
"bytes",
"+",
"name",
")",
".",
"digest",
"(",
")",
"return",
"UUID",
"(",
"bytes",
"=",
"hash",
"[",
":",
"16",
"]",
",",
"version",
"=",
"5",
")"
] | python | Generate a UUID from the SHA-1 hash of a namespace UUID and a name. | true |
2,539,231 | def xmlFile(path, mode='r'):
"""lxml cannot parse XML files starting with a BOM
(see http://www.w3.org/TR/2000/REC-xml-20001006 in F.1.)
In case such XML file is used, we must skip these characters
So we open all XML files for read with 'xmlFile'.
TODO: File this issue to lxml ML or tracker (feature or bug ?)
:param path: The path to the file
:param mode: Mode for opéning the file
"""
fh = file(path, mode)
while fh.read(1) != '<': # Ignoring everything before '<?xml...'
pass
fh.seek(-1, 1)
return fh | [
"def",
"xmlFile",
"(",
"path",
",",
"mode",
"=",
"'r'",
")",
":",
"fh",
"=",
"file",
"(",
"path",
",",
"mode",
")",
"while",
"fh",
".",
"read",
"(",
"1",
")",
"!=",
"'<'",
":",
"pass",
"fh",
".",
"seek",
"(",
"-",
"1",
",",
"1",
")",
"return",
"fh"
] | python | lxml cannot parse XML files starting with a BOM
(see http://www.w3.org/TR/2000/REC-xml-20001006 in F.1.)
In case such XML file is used, we must skip these characters
So we open all XML files for read with 'xmlFile'.
TODO: File this issue to lxml ML or tracker (feature or bug ?)
:param path: The path to the file
:param mode: Mode for opéning the file | true |
2,541,175 | def luhn(candidate):
"""
Checks a candidate number for validity according to the Luhn
algorithm (used in validation of, for example, credit cards).
Both numeric and string candidates are accepted.
"""
if not isinstance(candidate, six.string_types):
candidate = str(candidate)
try:
evens = sum(int(c) for c in candidate[-1::-2])
odds = sum(LUHN_ODD_LOOKUP[int(c)] for c in candidate[-2::-2])
return ((evens + odds) % 10 == 0)
except ValueError: # Raised if an int conversion fails
return False | [
"def",
"luhn",
"(",
"candidate",
")",
":",
"if",
"not",
"isinstance",
"(",
"candidate",
",",
"six",
".",
"string_types",
")",
":",
"candidate",
"=",
"str",
"(",
"candidate",
")",
"try",
":",
"evens",
"=",
"sum",
"(",
"int",
"(",
"c",
")",
"for",
"c",
"in",
"candidate",
"[",
"-",
"1",
":",
":",
"-",
"2",
"]",
")",
"odds",
"=",
"sum",
"(",
"LUHN_ODD_LOOKUP",
"[",
"int",
"(",
"c",
")",
"]",
"for",
"c",
"in",
"candidate",
"[",
"-",
"2",
":",
":",
"-",
"2",
"]",
")",
"return",
"(",
"(",
"evens",
"+",
"odds",
")",
"%",
"10",
"==",
"0",
")",
"except",
"ValueError",
":",
"return",
"False"
] | python | Checks a candidate number for validity according to the Luhn
algorithm (used in validation of, for example, credit cards).
Both numeric and string candidates are accepted. | true |
2,542,257 | def replace_methodname(format_string, methodname):
"""
Partially format a format_string, swapping out any '{methodname}' or '{methodnamehyphen}' components.
"""
methodnamehyphen = methodname.replace('_', '-')
ret = format_string
ret = ret.replace('{methodname}', methodname)
ret = ret.replace('{methodnamehyphen}', methodnamehyphen)
return ret | [
"def",
"replace_methodname",
"(",
"format_string",
",",
"methodname",
")",
":",
"methodnamehyphen",
"=",
"methodname",
".",
"replace",
"(",
"'_'",
",",
"'-'",
")",
"ret",
"=",
"format_string",
"ret",
"=",
"ret",
".",
"replace",
"(",
"'{methodname}'",
",",
"methodname",
")",
"ret",
"=",
"ret",
".",
"replace",
"(",
"'{methodnamehyphen}'",
",",
"methodnamehyphen",
")",
"return",
"ret"
] | python | Partially format a format_string, swapping out any '{methodname}' or '{methodnamehyphen}' components. | true |
2,542,532 | def _positive_int(integer_string, strict=False, cutoff=None):
"""
Cast a string to a strictly positive integer.
"""
ret = int(integer_string)
if ret < 0 or (ret == 0 and strict):
raise ValueError()
if cutoff:
ret = min(ret, cutoff)
return ret | [
"def",
"_positive_int",
"(",
"integer_string",
",",
"strict",
"=",
"False",
",",
"cutoff",
"=",
"None",
")",
":",
"ret",
"=",
"int",
"(",
"integer_string",
")",
"if",
"ret",
"<",
"0",
"or",
"(",
"ret",
"==",
"0",
"and",
"strict",
")",
":",
"raise",
"ValueError",
"(",
")",
"if",
"cutoff",
":",
"ret",
"=",
"min",
"(",
"ret",
",",
"cutoff",
")",
"return",
"ret"
] | python | Cast a string to a strictly positive integer. | true |
2,543,187 | def detail_route(methods=None, **kwargs):
"""
Used to mark a method on a ViewSet that should be routed for detail requests.
Usage::
class UserViewSet(ModelCRUDViewSet):
model = User
schema = UserSchema
@detail_route(methods=['post'], url_path='lock-user')
def lock_user(request, id):
...
:param methods: An iterable of strings representing the HTTP (GET, POST, etc.) methods accepted by the route.
:param url_path: Replaces the route automatically generated by the ViewSetRouter for the decorated method
with the value provided.
"""
methods = ['get'] if (methods is None) else methods
def decorator(func):
func.bind_to_methods = methods
func.detail = True
func.kwargs = kwargs
return func
return decorator | [
"def",
"detail_route",
"(",
"methods",
"=",
"None",
",",
"**",
"kwargs",
")",
":",
"methods",
"=",
"[",
"'get'",
"]",
"if",
"(",
"methods",
"is",
"None",
")",
"else",
"methods",
"def",
"decorator",
"(",
"func",
")",
":",
"func",
".",
"bind_to_methods",
"=",
"methods",
"func",
".",
"detail",
"=",
"True",
"func",
".",
"kwargs",
"=",
"kwargs",
"return",
"func",
"return",
"decorator"
] | python | Used to mark a method on a ViewSet that should be routed for detail requests.
Usage::
class UserViewSet(ModelCRUDViewSet):
model = User
schema = UserSchema
@detail_route(methods=['post'], url_path='lock-user')
def lock_user(request, id):
...
:param methods: An iterable of strings representing the HTTP (GET, POST, etc.) methods accepted by the route.
:param url_path: Replaces the route automatically generated by the ViewSetRouter for the decorated method
with the value provided. | true |
2,543,535 | def remove_query_param(url, key):
"""
Given a URL and a key/val pair, remove an item in the query
parameters of the URL, and return the new URL.
"""
(scheme, netloc, path, query, fragment) = urlparse.urlsplit(url)
query_dict = urlparse.parse_qs(query)
query_dict.pop(key, None)
query = urlparse.urlencode(sorted(list(query_dict.items())), doseq=True)
return urlparse.urlunsplit((scheme, netloc, path, query, fragment)) | [
"def",
"remove_query_param",
"(",
"url",
",",
"key",
")",
":",
"(",
"scheme",
",",
"netloc",
",",
"path",
",",
"query",
",",
"fragment",
")",
"=",
"urlparse",
".",
"urlsplit",
"(",
"url",
")",
"query_dict",
"=",
"urlparse",
".",
"parse_qs",
"(",
"query",
")",
"query_dict",
".",
"pop",
"(",
"key",
",",
"None",
")",
"query",
"=",
"urlparse",
".",
"urlencode",
"(",
"sorted",
"(",
"list",
"(",
"query_dict",
".",
"items",
"(",
")",
")",
")",
",",
"doseq",
"=",
"True",
")",
"return",
"urlparse",
".",
"urlunsplit",
"(",
"(",
"scheme",
",",
"netloc",
",",
"path",
",",
"query",
",",
"fragment",
")",
")"
] | python | Given a URL and a key/val pair, remove an item in the query
parameters of the URL, and return the new URL. | true |
2,548,655 | def base62_encode(cls, num):
"""Encode a number in Base X.
`num`: The number to encode
`alphabet`: The alphabet to use for encoding
Stolen from: http://stackoverflow.com/a/1119769/1144479
"""
alphabet = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
if num == 0:
return alphabet[0]
arr = []
base = len(alphabet)
while num:
rem = num % base
num = num // base
arr.append(alphabet[rem])
arr.reverse()
return ''.join(arr) | [
"def",
"base62_encode",
"(",
"cls",
",",
"num",
")",
":",
"alphabet",
"=",
"\"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"",
"if",
"num",
"==",
"0",
":",
"return",
"alphabet",
"[",
"0",
"]",
"arr",
"=",
"[",
"]",
"base",
"=",
"len",
"(",
"alphabet",
")",
"while",
"num",
":",
"rem",
"=",
"num",
"%",
"base",
"num",
"=",
"num",
"//",
"base",
"arr",
".",
"append",
"(",
"alphabet",
"[",
"rem",
"]",
")",
"arr",
".",
"reverse",
"(",
")",
"return",
"''",
".",
"join",
"(",
"arr",
")"
] | python | Encode a number in Base X.
`num`: The number to encode
`alphabet`: The alphabet to use for encoding
Stolen from: http://stackoverflow.com/a/1119769/1144479 | true |
2,549,011 | def pluginPackagePaths(name):
"""
Return a list of additional directories which should be searched for
modules to be included as part of the named plugin package.
@type name: C{str}
@param name: The fully-qualified Python name of a plugin package, eg
C{'twisted.plugins'}.
@rtype: C{list} of C{str}
@return: The absolute paths to other directories which may contain plugin
modules for the named plugin package.
"""
package = name.split('.')
# Note that this may include directories which do not exist. It may be
# preferable to remove such directories at this point, rather than allow
# them to be searched later on.
#
# Note as well that only '__init__.py' will be considered to make a
# directory a package (and thus exclude it from this list). This means
# that if you create a master plugin package which has some other kind of
# __init__ (eg, __init__.pyc) it will be incorrectly treated as a
# supplementary plugin directory.
return [
os.path.abspath(os.path.join(x, *package))
for x
in sys.path
if
not os.path.exists(os.path.join(x, *package + ['__init__.py']))] | [
"def",
"pluginPackagePaths",
"(",
"name",
")",
":",
"package",
"=",
"name",
".",
"split",
"(",
"'.'",
")",
"return",
"[",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"join",
"(",
"x",
",",
"*",
"package",
")",
")",
"for",
"x",
"in",
"sys",
".",
"path",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"x",
",",
"*",
"package",
"+",
"[",
"'__init__.py'",
"]",
")",
")",
"]"
] | python | Return a list of additional directories which should be searched for
modules to be included as part of the named plugin package.
@type name: C{str}
@param name: The fully-qualified Python name of a plugin package, eg
C{'twisted.plugins'}.
@rtype: C{list} of C{str}
@return: The absolute paths to other directories which may contain plugin
modules for the named plugin package. | true |
2,549,895 | def debug_break(sig, frame):
"""Interrupt running process, and provide a python prompt for interactive
debugging."""
d = {'_frame': frame} # Allow access to frame object.
d.update(frame.f_globals) # Unless shadowed by global
d.update(frame.f_locals)
i = code.InteractiveConsole(d)
message = "Signal recieved : entering python shell.\nTraceback:\n"
message += ''.join(traceback.format_stack(frame))
i.interact(message) | [
"def",
"debug_break",
"(",
"sig",
",",
"frame",
")",
":",
"d",
"=",
"{",
"'_frame'",
":",
"frame",
"}",
"d",
".",
"update",
"(",
"frame",
".",
"f_globals",
")",
"d",
".",
"update",
"(",
"frame",
".",
"f_locals",
")",
"i",
"=",
"code",
".",
"InteractiveConsole",
"(",
"d",
")",
"message",
"=",
"\"Signal recieved : entering python shell.\\nTraceback:\\n\"",
"message",
"+=",
"''",
".",
"join",
"(",
"traceback",
".",
"format_stack",
"(",
"frame",
")",
")",
"i",
".",
"interact",
"(",
"message",
")"
] | python | Interrupt running process, and provide a python prompt for interactive
debugging. | true |
2,550,903 | def _deferGenerator(g, deferred=None):
"""
See L{waitForDeferred}.
"""
result = None
while 1:
if deferred is None:
deferred = defer.Deferred()
try:
result = g.next()
except StopIteration:
deferred.callback(result)
return deferred
except:
deferred.errback()
return deferred
# Deferred.callback(Deferred) raises an error; we catch this case
# early here and give a nicer error message to the user in case
# they yield a Deferred. Perhaps eventually these semantics may
# change.
if isinstance(result, defer.Deferred):
return defer.fail(TypeError("Yield waitForDeferred(d), not d!"))
if isinstance(result, defer.waitForDeferred):
waiting = [True, None]
# Pass vars in so they don't get changed going around the loop
def gotResult(r, waiting=waiting, result=result):
result.result = r
if waiting[0]:
waiting[0] = False
waiting[1] = r
else:
_deferGenerator(g, deferred)
result.d.addBoth(gotResult)
if waiting[0]:
# Haven't called back yet, set flag so that we get reinvoked
# and return from the loop
waiting[0] = False
return deferred
result = None | [
"def",
"_deferGenerator",
"(",
"g",
",",
"deferred",
"=",
"None",
")",
":",
"result",
"=",
"None",
"while",
"1",
":",
"if",
"deferred",
"is",
"None",
":",
"deferred",
"=",
"defer",
".",
"Deferred",
"(",
")",
"try",
":",
"result",
"=",
"g",
".",
"next",
"(",
")",
"except",
"StopIteration",
":",
"deferred",
".",
"callback",
"(",
"result",
")",
"return",
"deferred",
"except",
":",
"deferred",
".",
"errback",
"(",
")",
"return",
"deferred",
"if",
"isinstance",
"(",
"result",
",",
"defer",
".",
"Deferred",
")",
":",
"return",
"defer",
".",
"fail",
"(",
"TypeError",
"(",
"\"Yield waitForDeferred(d), not d!\"",
")",
")",
"if",
"isinstance",
"(",
"result",
",",
"defer",
".",
"waitForDeferred",
")",
":",
"waiting",
"=",
"[",
"True",
",",
"None",
"]",
"def",
"gotResult",
"(",
"r",
",",
"waiting",
"=",
"waiting",
",",
"result",
"=",
"result",
")",
":",
"result",
".",
"result",
"=",
"r",
"if",
"waiting",
"[",
"0",
"]",
":",
"waiting",
"[",
"0",
"]",
"=",
"False",
"waiting",
"[",
"1",
"]",
"=",
"r",
"else",
":",
"_deferGenerator",
"(",
"g",
",",
"deferred",
")",
"result",
".",
"d",
".",
"addBoth",
"(",
"gotResult",
")",
"if",
"waiting",
"[",
"0",
"]",
":",
"waiting",
"[",
"0",
"]",
"=",
"False",
"return",
"deferred",
"result",
"=",
"None"
] | python | See L{waitForDeferred}. | true |
2,551,569 | def truncatechars(value, arg):
"""
Truncates a string after a certain number of chars.
Argument: Number of chars to truncate after.
"""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
if len(value) > length:
return value[:length] + '...'
return value | [
"def",
"truncatechars",
"(",
"value",
",",
"arg",
")",
":",
"try",
":",
"length",
"=",
"int",
"(",
"arg",
")",
"except",
"ValueError",
":",
"return",
"value",
"if",
"len",
"(",
"value",
")",
">",
"length",
":",
"return",
"value",
"[",
":",
"length",
"]",
"+",
"'...'",
"return",
"value"
] | python | Truncates a string after a certain number of chars.
Argument: Number of chars to truncate after. | true |
2,552,533 | def call_unrar(params):
"""Calls rar/unrar command line executable, returns stdout pipe"""
global rar_executable_cached
if rar_executable_cached is None:
for command in ('unrar', 'rar'):
try:
subprocess.Popen([command], stdout=subprocess.PIPE)
rar_executable_cached = command
break
except OSError:
pass
if rar_executable_cached is None:
raise UnpackerNotInstalled("No suitable RAR unpacker installed")
assert type(params) == list, "params must be list"
args = [rar_executable_cached] + params
try:
gc.disable() # See http://bugs.python.org/issue1336
return subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
gc.enable() | [
"def",
"call_unrar",
"(",
"params",
")",
":",
"global",
"rar_executable_cached",
"if",
"rar_executable_cached",
"is",
"None",
":",
"for",
"command",
"in",
"(",
"'unrar'",
",",
"'rar'",
")",
":",
"try",
":",
"subprocess",
".",
"Popen",
"(",
"[",
"command",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"rar_executable_cached",
"=",
"command",
"break",
"except",
"OSError",
":",
"pass",
"if",
"rar_executable_cached",
"is",
"None",
":",
"raise",
"UnpackerNotInstalled",
"(",
"\"No suitable RAR unpacker installed\"",
")",
"assert",
"type",
"(",
"params",
")",
"==",
"list",
",",
"\"params must be list\"",
"args",
"=",
"[",
"rar_executable_cached",
"]",
"+",
"params",
"try",
":",
"gc",
".",
"disable",
"(",
")",
"return",
"subprocess",
".",
"Popen",
"(",
"args",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"finally",
":",
"gc",
".",
"enable",
"(",
")"
] | python | Calls rar/unrar command line executable, returns stdout pipe | true |
2,553,980 | def gzip_encode(data):
"""data -> gzip encoded data
Encode data using the gzip content encoding as described in RFC 1952
"""
if not gzip:
raise NotImplementedError
f = StringIO.StringIO()
gzf = gzip.GzipFile(mode="wb", fileobj=f, compresslevel=1)
gzf.write(data)
gzf.close()
encoded = f.getvalue()
f.close()
return encoded | [
"def",
"gzip_encode",
"(",
"data",
")",
":",
"if",
"not",
"gzip",
":",
"raise",
"NotImplementedError",
"f",
"=",
"StringIO",
".",
"StringIO",
"(",
")",
"gzf",
"=",
"gzip",
".",
"GzipFile",
"(",
"mode",
"=",
"\"wb\"",
",",
"fileobj",
"=",
"f",
",",
"compresslevel",
"=",
"1",
")",
"gzf",
".",
"write",
"(",
"data",
")",
"gzf",
".",
"close",
"(",
")",
"encoded",
"=",
"f",
".",
"getvalue",
"(",
")",
"f",
".",
"close",
"(",
")",
"return",
"encoded"
] | python | data -> gzip encoded data
Encode data using the gzip content encoding as described in RFC 1952 | true |
2,554,052 | def pvariance(data, mu=None):
"""Return the population variance of ``data``.
data should be an iterable of Real-valued numbers, with at least one
value. The optional argument mu, if given, should be the mean of
the data. If it is missing or None, the mean is automatically calculated.
Use this function to calculate the variance from the entire population.
To estimate the variance from a sample, the ``variance`` function is
usually a better choice.
If you have already calculated the mean of the data, you can pass it as
the optional second argument to avoid recalculating it:
This function does not check that ``mu`` is actually the mean of ``data``.
Giving arbitrary values for ``mu`` may lead to invalid or impossible
results.
Decimals and Fractions are supported:
"""
if iter(data) is data:
data = list(data)
n = len(data)
if n < 1:
raise StatisticsError('pvariance requires at least one data point')
ss = _ss(data, mu)
return ss / n | [
"def",
"pvariance",
"(",
"data",
",",
"mu",
"=",
"None",
")",
":",
"if",
"iter",
"(",
"data",
")",
"is",
"data",
":",
"data",
"=",
"list",
"(",
"data",
")",
"n",
"=",
"len",
"(",
"data",
")",
"if",
"n",
"<",
"1",
":",
"raise",
"StatisticsError",
"(",
"'pvariance requires at least one data point'",
")",
"ss",
"=",
"_ss",
"(",
"data",
",",
"mu",
")",
"return",
"ss",
"/",
"n"
] | python | Return the population variance of ``data``.
data should be an iterable of Real-valued numbers, with at least one
value. The optional argument mu, if given, should be the mean of
the data. If it is missing or None, the mean is automatically calculated.
Use this function to calculate the variance from the entire population.
To estimate the variance from a sample, the ``variance`` function is
usually a better choice.
If you have already calculated the mean of the data, you can pass it as
the optional second argument to avoid recalculating it:
This function does not check that ``mu`` is actually the mean of ``data``.
Giving arbitrary values for ``mu`` may lead to invalid or impossible
results.
Decimals and Fractions are supported: | true |
2,554,054 | def pstdev(data, mu=None):
"""Return the square root of the population variance.
See ``pvariance`` for arguments and other details.
"""
var = pvariance(data, mu)
try:
return var.sqrt()
except AttributeError:
return math.sqrt(var) | [
"def",
"pstdev",
"(",
"data",
",",
"mu",
"=",
"None",
")",
":",
"var",
"=",
"pvariance",
"(",
"data",
",",
"mu",
")",
"try",
":",
"return",
"var",
".",
"sqrt",
"(",
")",
"except",
"AttributeError",
":",
"return",
"math",
".",
"sqrt",
"(",
"var",
")"
] | python | Return the square root of the population variance.
See ``pvariance`` for arguments and other details. | true |
2,557,238 | def pair_hmm_align_unaligned_seqs(seqs, moltype=DNA_cogent, params={}):
"""
Checks parameters for pairwise alignment, returns alignment.
Code from Greg Caporaso.
"""
seqs = LoadSeqs(data=seqs, moltype=moltype, aligned=False)
try:
s1, s2 = seqs.values()
except ValueError:
raise ValueError(
"Pairwise aligning of seqs requires exactly two seqs.")
try:
gap_open = params['gap_open']
except KeyError:
gap_open = 5
try:
gap_extend = params['gap_extend']
except KeyError:
gap_extend = 2
try:
score_matrix = params['score_matrix']
except KeyError:
score_matrix = make_dna_scoring_dict(
match=1, transition=-1, transversion=-1)
return local_pairwise(s1, s2, score_matrix, gap_open, gap_extend) | [
"def",
"pair_hmm_align_unaligned_seqs",
"(",
"seqs",
",",
"moltype",
"=",
"DNA_cogent",
",",
"params",
"=",
"{",
"}",
")",
":",
"seqs",
"=",
"LoadSeqs",
"(",
"data",
"=",
"seqs",
",",
"moltype",
"=",
"moltype",
",",
"aligned",
"=",
"False",
")",
"try",
":",
"s1",
",",
"s2",
"=",
"seqs",
".",
"values",
"(",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"Pairwise aligning of seqs requires exactly two seqs.\"",
")",
"try",
":",
"gap_open",
"=",
"params",
"[",
"'gap_open'",
"]",
"except",
"KeyError",
":",
"gap_open",
"=",
"5",
"try",
":",
"gap_extend",
"=",
"params",
"[",
"'gap_extend'",
"]",
"except",
"KeyError",
":",
"gap_extend",
"=",
"2",
"try",
":",
"score_matrix",
"=",
"params",
"[",
"'score_matrix'",
"]",
"except",
"KeyError",
":",
"score_matrix",
"=",
"make_dna_scoring_dict",
"(",
"match",
"=",
"1",
",",
"transition",
"=",
"-",
"1",
",",
"transversion",
"=",
"-",
"1",
")",
"return",
"local_pairwise",
"(",
"s1",
",",
"s2",
",",
"score_matrix",
",",
"gap_open",
",",
"gap_extend",
")"
] | python | Checks parameters for pairwise alignment, returns alignment.
Code from Greg Caporaso. | true |
2,559,509 | def _argsdicts( args, mydict ):
"""A utility generator that pads argument list and dictionary values, will only be called with len( args ) = 0, 1."""
if len( args ) == 0:
args = None,
elif len( args ) == 1:
args = _totuple( args[0] )
else:
raise Exception( "We should have never gotten here." )
mykeys = list( mydict.keys( ) )
myvalues = list( map( _totuple, list( mydict.values( ) ) ) )
maxlength = max( list( map( len, [ args ] + myvalues ) ) )
for i in range( maxlength ):
thisdict = { }
for key, value in zip( mykeys, myvalues ):
try:
thisdict[ key ] = value[i]
except IndexError:
thisdict[ key ] = value[-1]
try:
thisarg = args[i]
except IndexError:
thisarg = args[-1]
yield thisarg, thisdict | [
"def",
"_argsdicts",
"(",
"args",
",",
"mydict",
")",
":",
"if",
"len",
"(",
"args",
")",
"==",
"0",
":",
"args",
"=",
"None",
",",
"elif",
"len",
"(",
"args",
")",
"==",
"1",
":",
"args",
"=",
"_totuple",
"(",
"args",
"[",
"0",
"]",
")",
"else",
":",
"raise",
"Exception",
"(",
"\"We should have never gotten here.\"",
")",
"mykeys",
"=",
"list",
"(",
"mydict",
".",
"keys",
"(",
")",
")",
"myvalues",
"=",
"list",
"(",
"map",
"(",
"_totuple",
",",
"list",
"(",
"mydict",
".",
"values",
"(",
")",
")",
")",
")",
"maxlength",
"=",
"max",
"(",
"list",
"(",
"map",
"(",
"len",
",",
"[",
"args",
"]",
"+",
"myvalues",
")",
")",
")",
"for",
"i",
"in",
"range",
"(",
"maxlength",
")",
":",
"thisdict",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"zip",
"(",
"mykeys",
",",
"myvalues",
")",
":",
"try",
":",
"thisdict",
"[",
"key",
"]",
"=",
"value",
"[",
"i",
"]",
"except",
"IndexError",
":",
"thisdict",
"[",
"key",
"]",
"=",
"value",
"[",
"-",
"1",
"]",
"try",
":",
"thisarg",
"=",
"args",
"[",
"i",
"]",
"except",
"IndexError",
":",
"thisarg",
"=",
"args",
"[",
"-",
"1",
"]",
"yield",
"thisarg",
",",
"thisdict"
] | python | A utility generator that pads argument list and dictionary values, will only be called with len( args ) = 0, 1. | true |
2,559,510 | def _totuple( x ):
"""Utility stuff to convert string, int, long, float, None or anything to a usable tuple."""
if isinstance( x, basestring ):
out = x,
elif isinstance( x, ( int, long, float ) ):
out = str( x ),
elif x is None:
out = None,
else:
out = tuple( x )
return out | [
"def",
"_totuple",
"(",
"x",
")",
":",
"if",
"isinstance",
"(",
"x",
",",
"basestring",
")",
":",
"out",
"=",
"x",
",",
"elif",
"isinstance",
"(",
"x",
",",
"(",
"int",
",",
"long",
",",
"float",
")",
")",
":",
"out",
"=",
"str",
"(",
"x",
")",
",",
"elif",
"x",
"is",
"None",
":",
"out",
"=",
"None",
",",
"else",
":",
"out",
"=",
"tuple",
"(",
"x",
")",
"return",
"out"
] | python | Utility stuff to convert string, int, long, float, None or anything to a usable tuple. | true |
2,559,831 | def coerce_put_post(request):
"""
Django doesn't particularly understand REST.
In case we send data over PUT, Django won't
actually look at the data and load it. We need
to twist its arm here.
The try/except abominiation here is due to a bug
in mod_python. This should fix it.
"""
if request.method == "PUT":
# Bug fix: if _load_post_and_files has already been called, for
# example by middleware accessing request.POST, the below code to
# pretend the request is a POST instead of a PUT will be too late
# to make a difference. Also calling _load_post_and_files will result
# in the following exception:
# AttributeError: You cannot set the upload handlers after the upload has been processed.
# The fix is to check for the presence of the _post field which is set
# the first time _load_post_and_files is called (both by wsgi.py and
# modpython.py). If it's set, the request has to be 'reset' to redo
# the query value parsing in POST mode.
if hasattr(request, '_post'):
del request._post
del request._files
try:
request.method = "POST"
request._load_post_and_files()
request.method = "PUT"
except AttributeError:
request.META['REQUEST_METHOD'] = 'POST'
request._load_post_and_files()
request.META['REQUEST_METHOD'] = 'PUT'
request.PUT = request.POST | [
"def",
"coerce_put_post",
"(",
"request",
")",
":",
"if",
"request",
".",
"method",
"==",
"\"PUT\"",
":",
"if",
"hasattr",
"(",
"request",
",",
"'_post'",
")",
":",
"del",
"request",
".",
"_post",
"del",
"request",
".",
"_files",
"try",
":",
"request",
".",
"method",
"=",
"\"POST\"",
"request",
".",
"_load_post_and_files",
"(",
")",
"request",
".",
"method",
"=",
"\"PUT\"",
"except",
"AttributeError",
":",
"request",
".",
"META",
"[",
"'REQUEST_METHOD'",
"]",
"=",
"'POST'",
"request",
".",
"_load_post_and_files",
"(",
")",
"request",
".",
"META",
"[",
"'REQUEST_METHOD'",
"]",
"=",
"'PUT'",
"request",
".",
"PUT",
"=",
"request",
".",
"POST"
] | python | Django doesn't particularly understand REST.
In case we send data over PUT, Django won't
actually look at the data and load it. We need
to twist its arm here.
The try/except abominiation here is due to a bug
in mod_python. This should fix it. | true |
2,561,991 | def absolutify(url):
"""Takes a URL and prepends the SITE_URL"""
site_url = getattr(settings, 'SITE_URL', False)
# If we don't define it explicitly
if not site_url:
protocol = settings.PROTOCOL
hostname = settings.DOMAIN
port = settings.PORT
if (protocol, port) in (('https://', 443), ('http://', 80)):
site_url = ''.join(map(str, (protocol, hostname)))
else:
site_url = ''.join(map(str, (protocol, hostname, ':', port)))
return site_url + url | [
"def",
"absolutify",
"(",
"url",
")",
":",
"site_url",
"=",
"getattr",
"(",
"settings",
",",
"'SITE_URL'",
",",
"False",
")",
"if",
"not",
"site_url",
":",
"protocol",
"=",
"settings",
".",
"PROTOCOL",
"hostname",
"=",
"settings",
".",
"DOMAIN",
"port",
"=",
"settings",
".",
"PORT",
"if",
"(",
"protocol",
",",
"port",
")",
"in",
"(",
"(",
"'https://'",
",",
"443",
")",
",",
"(",
"'http://'",
",",
"80",
")",
")",
":",
"site_url",
"=",
"''",
".",
"join",
"(",
"map",
"(",
"str",
",",
"(",
"protocol",
",",
"hostname",
")",
")",
")",
"else",
":",
"site_url",
"=",
"''",
".",
"join",
"(",
"map",
"(",
"str",
",",
"(",
"protocol",
",",
"hostname",
",",
"':'",
",",
"port",
")",
")",
")",
"return",
"site_url",
"+",
"url"
] | python | Takes a URL and prepends the SITE_URL | true |
2,563,005 | def _urlencode(items):
"""A Unicode-safe URLencoder."""
try:
return urllib.urlencode(items)
except UnicodeEncodeError:
return urllib.urlencode([(k, smart_str(v)) for k, v in items]) | [
"def",
"_urlencode",
"(",
"items",
")",
":",
"try",
":",
"return",
"urllib",
".",
"urlencode",
"(",
"items",
")",
"except",
"UnicodeEncodeError",
":",
"return",
"urllib",
".",
"urlencode",
"(",
"[",
"(",
"k",
",",
"smart_str",
"(",
"v",
")",
")",
"for",
"k",
",",
"v",
"in",
"items",
"]",
")"
] | python | A Unicode-safe URLencoder. | true |
2,567,026 | def load_stop_words(stop_word_file):
"""
Utility function to load stop words from a file and return as a list of words
@param stop_word_file Path and file name of a file containing stop words.
@return list A list of stop words.
"""
stop_words = []
for line in open(stop_word_file):
if line.strip()[0:1] != "#":
for word in line.split(): # in case more than one per line
stop_words.append(word)
return stop_words | [
"def",
"load_stop_words",
"(",
"stop_word_file",
")",
":",
"stop_words",
"=",
"[",
"]",
"for",
"line",
"in",
"open",
"(",
"stop_word_file",
")",
":",
"if",
"line",
".",
"strip",
"(",
")",
"[",
"0",
":",
"1",
"]",
"!=",
"\"#\"",
":",
"for",
"word",
"in",
"line",
".",
"split",
"(",
")",
":",
"stop_words",
".",
"append",
"(",
"word",
")",
"return",
"stop_words"
] | python | Utility function to load stop words from a file and return as a list of words
@param stop_word_file Path and file name of a file containing stop words.
@return list A list of stop words. | true |
2,567,027 | def separate_words(text, min_word_return_size):
"""
Utility function to return a list of all words that are have a length greater than a specified number of characters.
@param text The text that must be split in to words.
@param min_word_return_size The minimum no of characters a word must have to be included.
"""
splitter = re.compile('[^a-zA-Z0-9_\\+\\-/]')
words = []
for single_word in splitter.split(text):
current_word = single_word.strip().lower()
#leave numbers in phrase, but don't count as words, since they tend to invalidate scores of their phrases
if len(current_word) > min_word_return_size and current_word != '' and not is_number(current_word):
words.append(current_word)
return words | [
"def",
"separate_words",
"(",
"text",
",",
"min_word_return_size",
")",
":",
"splitter",
"=",
"re",
".",
"compile",
"(",
"'[^a-zA-Z0-9_\\\\+\\\\-/]'",
")",
"words",
"=",
"[",
"]",
"for",
"single_word",
"in",
"splitter",
".",
"split",
"(",
"text",
")",
":",
"current_word",
"=",
"single_word",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"if",
"len",
"(",
"current_word",
")",
">",
"min_word_return_size",
"and",
"current_word",
"!=",
"''",
"and",
"not",
"is_number",
"(",
"current_word",
")",
":",
"words",
".",
"append",
"(",
"current_word",
")",
"return",
"words"
] | python | Utility function to return a list of all words that are have a length greater than a specified number of characters.
@param text The text that must be split in to words.
@param min_word_return_size The minimum no of characters a word must have to be included. | true |
2,576,320 | def get_requirement_from_url(url):
"""Get a requirement from the URL, if possible. This looks for #egg
in the URL"""
link = Link(url)
egg_info = link.egg_fragment
if not egg_info:
egg_info = splitext(link.filename)[0]
return package_to_requirement(egg_info) | [
"def",
"get_requirement_from_url",
"(",
"url",
")",
":",
"link",
"=",
"Link",
"(",
"url",
")",
"egg_info",
"=",
"link",
".",
"egg_fragment",
"if",
"not",
"egg_info",
":",
"egg_info",
"=",
"splitext",
"(",
"link",
".",
"filename",
")",
"[",
"0",
"]",
"return",
"package_to_requirement",
"(",
"egg_info",
")"
] | python | Get a requirement from the URL, if possible. This looks for #egg
in the URL | true |
2,576,321 | def package_to_requirement(package_name):
"""Translate a name like Foo-1.2 to Foo==1.3"""
match = re.search(r'^(.*?)-(dev|\d.*)', package_name)
if match:
name = match.group(1)
version = match.group(2)
else:
name = package_name
version = ''
if version:
return '%s==%s' % (name, version)
else:
return name | [
"def",
"package_to_requirement",
"(",
"package_name",
")",
":",
"match",
"=",
"re",
".",
"search",
"(",
"r'^(.*?)-(dev|\\d.*)'",
",",
"package_name",
")",
"if",
"match",
":",
"name",
"=",
"match",
".",
"group",
"(",
"1",
")",
"version",
"=",
"match",
".",
"group",
"(",
"2",
")",
"else",
":",
"name",
"=",
"package_name",
"version",
"=",
"''",
"if",
"version",
":",
"return",
"'%s==%s'",
"%",
"(",
"name",
",",
"version",
")",
"else",
":",
"return",
"name"
] | python | Translate a name like Foo-1.2 to Foo==1.3 | true |
2,576,322 | def get_mirrors(hostname=None):
"""Return the list of mirrors from the last record found on the DNS
entry::
>>> from pip.index import get_mirrors
>>> get_mirrors()
['a.pypi.python.org', 'b.pypi.python.org', 'c.pypi.python.org',
'd.pypi.python.org']
Originally written for the distutils2 project by Alexis Metaireau.
"""
if hostname is None:
hostname = DEFAULT_MIRROR_URL
# return the last mirror registered on PyPI.
try:
hostname = socket.gethostbyname_ex(hostname)[0]
except socket.gaierror:
return []
end_letter = hostname.split(".", 1)
# determine the list from the last one.
return ["%s.%s" % (s, end_letter[1]) for s in string_range(end_letter[0])] | [
"def",
"get_mirrors",
"(",
"hostname",
"=",
"None",
")",
":",
"if",
"hostname",
"is",
"None",
":",
"hostname",
"=",
"DEFAULT_MIRROR_URL",
"try",
":",
"hostname",
"=",
"socket",
".",
"gethostbyname_ex",
"(",
"hostname",
")",
"[",
"0",
"]",
"except",
"socket",
".",
"gaierror",
":",
"return",
"[",
"]",
"end_letter",
"=",
"hostname",
".",
"split",
"(",
"\".\"",
",",
"1",
")",
"return",
"[",
"\"%s.%s\"",
"%",
"(",
"s",
",",
"end_letter",
"[",
"1",
"]",
")",
"for",
"s",
"in",
"string_range",
"(",
"end_letter",
"[",
"0",
"]",
")",
"]"
] | python | Return the list of mirrors from the last record found on the DNS
entry::
>>> from pip.index import get_mirrors
>>> get_mirrors()
['a.pypi.python.org', 'b.pypi.python.org', 'c.pypi.python.org',
'd.pypi.python.org']
Originally written for the distutils2 project by Alexis Metaireau. | true |
2,576,323 | def string_range(last):
"""Compute the range of string between "a" and last.
This works for simple "a to z" lists, but also for "a to zz" lists.
"""
for k in range(len(last)):
for x in product(string.ascii_lowercase, repeat=k+1):
result = ''.join(x)
yield result
if result == last:
return | [
"def",
"string_range",
"(",
"last",
")",
":",
"for",
"k",
"in",
"range",
"(",
"len",
"(",
"last",
")",
")",
":",
"for",
"x",
"in",
"product",
"(",
"string",
".",
"ascii_lowercase",
",",
"repeat",
"=",
"k",
"+",
"1",
")",
":",
"result",
"=",
"''",
".",
"join",
"(",
"x",
")",
"yield",
"result",
"if",
"result",
"==",
"last",
":",
"return"
] | python | Compute the range of string between "a" and last.
This works for simple "a to z" lists, but also for "a to zz" lists. | true |
2,576,958 | def open_logfile(filename, mode='a'):
"""Open the named log file in append mode.
If the file already exists, a separator will also be printed to
the file to separate past activity from current activity.
"""
filename = os.path.expanduser(filename)
filename = os.path.abspath(filename)
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
exists = os.path.exists(filename)
log_fp = open(filename, mode)
if exists:
log_fp.write('%s\n' % ('-'*60))
log_fp.write('%s run on %s\n' % (sys.argv[0], time.strftime('%c')))
return log_fp | [
"def",
"open_logfile",
"(",
"filename",
",",
"mode",
"=",
"'a'",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"filename",
")",
"filename",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"filename",
")",
"dirname",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"filename",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dirname",
")",
":",
"os",
".",
"makedirs",
"(",
"dirname",
")",
"exists",
"=",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
"log_fp",
"=",
"open",
"(",
"filename",
",",
"mode",
")",
"if",
"exists",
":",
"log_fp",
".",
"write",
"(",
"'%s\\n'",
"%",
"(",
"'-'",
"*",
"60",
")",
")",
"log_fp",
".",
"write",
"(",
"'%s run on %s\\n'",
"%",
"(",
"sys",
".",
"argv",
"[",
"0",
"]",
",",
"time",
".",
"strftime",
"(",
"'%c'",
")",
")",
")",
"return",
"log_fp"
] | python | Open the named log file in append mode.
If the file already exists, a separator will also be printed to
the file to separate past activity from current activity. | true |
2,577,573 | def tzname_in_python2(myfunc):
"""Change unicode output into bytestrings in Python 2
tzname() API changed in Python 3. It used to return bytes, but was changed
to unicode strings
"""
def inner_func(*args, **kwargs):
if PY3:
return myfunc(*args, **kwargs)
else:
return myfunc(*args, **kwargs).encode()
return inner_func | [
"def",
"tzname_in_python2",
"(",
"myfunc",
")",
":",
"def",
"inner_func",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"if",
"PY3",
":",
"return",
"myfunc",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"else",
":",
"return",
"myfunc",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
".",
"encode",
"(",
")",
"return",
"inner_func"
] | python | Change unicode output into bytestrings in Python 2
tzname() API changed in Python 3. It used to return bytes, but was changed
to unicode strings | true |
2,578,110 | def display_path(path):
"""Gives the display value for a given path, making it relative to cwd
if possible."""
path = os.path.normcase(os.path.abspath(path))
if path.startswith(os.getcwd() + os.path.sep):
path = '.' + path[len(os.getcwd()):]
return path | [
"def",
"display_path",
"(",
"path",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"normcase",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
")",
"if",
"path",
".",
"startswith",
"(",
"os",
".",
"getcwd",
"(",
")",
"+",
"os",
".",
"path",
".",
"sep",
")",
":",
"path",
"=",
"'.'",
"+",
"path",
"[",
"len",
"(",
"os",
".",
"getcwd",
"(",
")",
")",
":",
"]",
"return",
"path"
] | python | Gives the display value for a given path, making it relative to cwd
if possible. | true |
2,578,550 | def strip_encoding_cookie(filelike):
"""Generator to pull lines from a text-mode file, skipping the encoding
cookie if it is found in the first two lines.
"""
it = iter(filelike)
try:
first = next(it)
if not cookie_comment_re.match(first):
yield first
second = next(it)
if not cookie_comment_re.match(second):
yield second
except StopIteration:
return
for line in it:
yield line | [
"def",
"strip_encoding_cookie",
"(",
"filelike",
")",
":",
"it",
"=",
"iter",
"(",
"filelike",
")",
"try",
":",
"first",
"=",
"next",
"(",
"it",
")",
"if",
"not",
"cookie_comment_re",
".",
"match",
"(",
"first",
")",
":",
"yield",
"first",
"second",
"=",
"next",
"(",
"it",
")",
"if",
"not",
"cookie_comment_re",
".",
"match",
"(",
"second",
")",
":",
"yield",
"second",
"except",
"StopIteration",
":",
"return",
"for",
"line",
"in",
"it",
":",
"yield",
"line"
] | python | Generator to pull lines from a text-mode file, skipping the encoding
cookie if it is found in the first two lines. | true |
2,578,660 | def path_to_url(path):
"""
Convert a path to a file: URL. The path will be made absolute.
"""
path = os.path.normcase(os.path.abspath(path))
if _drive_re.match(path):
path = path[0] + '|' + path[2:]
url = urllib.quote(path)
url = url.replace(os.path.sep, '/')
url = url.lstrip('/')
return 'file:///' + url | [
"def",
"path_to_url",
"(",
"path",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"normcase",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
")",
"if",
"_drive_re",
".",
"match",
"(",
"path",
")",
":",
"path",
"=",
"path",
"[",
"0",
"]",
"+",
"'|'",
"+",
"path",
"[",
"2",
":",
"]",
"url",
"=",
"urllib",
".",
"quote",
"(",
"path",
")",
"url",
"=",
"url",
".",
"replace",
"(",
"os",
".",
"path",
".",
"sep",
",",
"'/'",
")",
"url",
"=",
"url",
".",
"lstrip",
"(",
"'/'",
")",
"return",
"'file:///'",
"+",
"url"
] | python | Convert a path to a file: URL. The path will be made absolute. | true |
2,578,661 | def path_to_url2(path):
"""
Convert a path to a file: URL. The path will be made absolute and have
quoted path parts.
"""
path = os.path.normpath(os.path.abspath(path))
drive, path = os.path.splitdrive(path)
filepath = path.split(os.path.sep)
url = '/'.join([urllib.quote(part) for part in filepath])
if not drive:
url = url.lstrip('/')
return 'file:///' + drive + url | [
"def",
"path_to_url2",
"(",
"path",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"path",
")",
")",
"drive",
",",
"path",
"=",
"os",
".",
"path",
".",
"splitdrive",
"(",
"path",
")",
"filepath",
"=",
"path",
".",
"split",
"(",
"os",
".",
"path",
".",
"sep",
")",
"url",
"=",
"'/'",
".",
"join",
"(",
"[",
"urllib",
".",
"quote",
"(",
"part",
")",
"for",
"part",
"in",
"filepath",
"]",
")",
"if",
"not",
"drive",
":",
"url",
"=",
"url",
".",
"lstrip",
"(",
"'/'",
")",
"return",
"'file:///'",
"+",
"drive",
"+",
"url"
] | python | Convert a path to a file: URL. The path will be made absolute and have
quoted path parts. | true |
2,579,590 | def _find_spec_from_path(name, path=None):
"""Return the spec for the specified module.
First, sys.modules is checked to see if the module was already imported. If
so, then sys.modules[name].__spec__ is returned. If that happens to be
set to None, then ValueError is raised. If the module is not in
sys.modules, then sys.meta_path is searched for a suitable spec with the
value of 'path' given to the finders. None is returned if no spec could
be found.
Dotted names do not have their parent packages implicitly imported. You will
most likely need to explicitly import all parent packages in the proper
order for a submodule to get the correct spec.
"""
if name not in sys.modules:
return _find_spec(name, path)
else:
module = sys.modules[name]
if module is None:
return None
try:
spec = module.__spec__
except AttributeError:
six.raise_from(ValueError('{}.__spec__ is not set'.format(name)), None)
else:
if spec is None:
raise ValueError('{}.__spec__ is None'.format(name))
return spec | [
"def",
"_find_spec_from_path",
"(",
"name",
",",
"path",
"=",
"None",
")",
":",
"if",
"name",
"not",
"in",
"sys",
".",
"modules",
":",
"return",
"_find_spec",
"(",
"name",
",",
"path",
")",
"else",
":",
"module",
"=",
"sys",
".",
"modules",
"[",
"name",
"]",
"if",
"module",
"is",
"None",
":",
"return",
"None",
"try",
":",
"spec",
"=",
"module",
".",
"__spec__",
"except",
"AttributeError",
":",
"six",
".",
"raise_from",
"(",
"ValueError",
"(",
"'{}.__spec__ is not set'",
".",
"format",
"(",
"name",
")",
")",
",",
"None",
")",
"else",
":",
"if",
"spec",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'{}.__spec__ is None'",
".",
"format",
"(",
"name",
")",
")",
"return",
"spec"
] | python | Return the spec for the specified module.
First, sys.modules is checked to see if the module was already imported. If
so, then sys.modules[name].__spec__ is returned. If that happens to be
set to None, then ValueError is raised. If the module is not in
sys.modules, then sys.meta_path is searched for a suitable spec with the
value of 'path' given to the finders. None is returned if no spec could
be found.
Dotted names do not have their parent packages implicitly imported. You will
most likely need to explicitly import all parent packages in the proper
order for a submodule to get the correct spec. | true |
2,579,591 | def find_spec(name, package=None):
"""Return the spec for the specified module.
First, sys.modules is checked to see if the module was already imported. If
so, then sys.modules[name].__spec__ is returned. If that happens to be
set to None, then ValueError is raised. If the module is not in
sys.modules, then sys.meta_path is searched for a suitable spec with the
value of 'path' given to the finders. None is returned if no spec could
be found.
If the name is for submodule (contains a dot), the parent module is
automatically imported.
The name and package arguments work the same as importlib.import_module().
In other words, relative module names (with leading dots) work.
"""
fullname = resolve_name(name, package) if name.startswith('.') else name
if fullname not in sys.modules:
parent_name = fullname.rpartition('.')[0]
if parent_name:
# Use builtins.__import__() in case someone replaced it.
parent = __import__(parent_name, fromlist=['__path__'])
return _find_spec(fullname, parent.__path__)
else:
return _find_spec(fullname, None)
else:
module = sys.modules[fullname]
if module is None:
return None
try:
spec = module.__spec__
except AttributeError:
six.raise_from(ValueError('{}.__spec__ is not set'.format(name)), None)
else:
if spec is None:
raise ValueError('{}.__spec__ is None'.format(name))
return spec | [
"def",
"find_spec",
"(",
"name",
",",
"package",
"=",
"None",
")",
":",
"fullname",
"=",
"resolve_name",
"(",
"name",
",",
"package",
")",
"if",
"name",
".",
"startswith",
"(",
"'.'",
")",
"else",
"name",
"if",
"fullname",
"not",
"in",
"sys",
".",
"modules",
":",
"parent_name",
"=",
"fullname",
".",
"rpartition",
"(",
"'.'",
")",
"[",
"0",
"]",
"if",
"parent_name",
":",
"parent",
"=",
"__import__",
"(",
"parent_name",
",",
"fromlist",
"=",
"[",
"'__path__'",
"]",
")",
"return",
"_find_spec",
"(",
"fullname",
",",
"parent",
".",
"__path__",
")",
"else",
":",
"return",
"_find_spec",
"(",
"fullname",
",",
"None",
")",
"else",
":",
"module",
"=",
"sys",
".",
"modules",
"[",
"fullname",
"]",
"if",
"module",
"is",
"None",
":",
"return",
"None",
"try",
":",
"spec",
"=",
"module",
".",
"__spec__",
"except",
"AttributeError",
":",
"six",
".",
"raise_from",
"(",
"ValueError",
"(",
"'{}.__spec__ is not set'",
".",
"format",
"(",
"name",
")",
")",
",",
"None",
")",
"else",
":",
"if",
"spec",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'{}.__spec__ is None'",
".",
"format",
"(",
"name",
")",
")",
"return",
"spec"
] | python | Return the spec for the specified module.
First, sys.modules is checked to see if the module was already imported. If
so, then sys.modules[name].__spec__ is returned. If that happens to be
set to None, then ValueError is raised. If the module is not in
sys.modules, then sys.meta_path is searched for a suitable spec with the
value of 'path' given to the finders. None is returned if no spec could
be found.
If the name is for submodule (contains a dot), the parent module is
automatically imported.
The name and package arguments work the same as importlib.import_module().
In other words, relative module names (with leading dots) work. | true |
2,579,592 | def set_package(fxn):
"""Set __package__ on the returned module.
This function is deprecated.
"""
@functools.wraps(fxn)
def set_package_wrapper(*args, **kwargs):
warnings.warn('The import system now takes care of this automatically.',
DeprecationWarning, stacklevel=2)
module = fxn(*args, **kwargs)
if getattr(module, '__package__', None) is None:
module.__package__ = module.__name__
if not hasattr(module, '__path__'):
module.__package__ = module.__package__.rpartition('.')[0]
return module
return set_package_wrapper | [
"def",
"set_package",
"(",
"fxn",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"fxn",
")",
"def",
"set_package_wrapper",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"warnings",
".",
"warn",
"(",
"'The import system now takes care of this automatically.'",
",",
"DeprecationWarning",
",",
"stacklevel",
"=",
"2",
")",
"module",
"=",
"fxn",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"if",
"getattr",
"(",
"module",
",",
"'__package__'",
",",
"None",
")",
"is",
"None",
":",
"module",
".",
"__package__",
"=",
"module",
".",
"__name__",
"if",
"not",
"hasattr",
"(",
"module",
",",
"'__path__'",
")",
":",
"module",
".",
"__package__",
"=",
"module",
".",
"__package__",
".",
"rpartition",
"(",
"'.'",
")",
"[",
"0",
"]",
"return",
"module",
"return",
"set_package_wrapper"
] | python | Set __package__ on the returned module.
This function is deprecated. | true |
2,579,593 | def set_loader(fxn):
"""Set __loader__ on the returned module.
This function is deprecated.
"""
@functools.wraps(fxn)
def set_loader_wrapper(self, *args, **kwargs):
warnings.warn('The import system now takes care of this automatically.',
DeprecationWarning, stacklevel=2)
module = fxn(self, *args, **kwargs)
if getattr(module, '__loader__', None) is None:
module.__loader__ = self
return module
return set_loader_wrapper | [
"def",
"set_loader",
"(",
"fxn",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"fxn",
")",
"def",
"set_loader_wrapper",
"(",
"self",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"warnings",
".",
"warn",
"(",
"'The import system now takes care of this automatically.'",
",",
"DeprecationWarning",
",",
"stacklevel",
"=",
"2",
")",
"module",
"=",
"fxn",
"(",
"self",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
"if",
"getattr",
"(",
"module",
",",
"'__loader__'",
",",
"None",
")",
"is",
"None",
":",
"module",
".",
"__loader__",
"=",
"self",
"return",
"module",
"return",
"set_loader_wrapper"
] | python | Set __loader__ on the returned module.
This function is deprecated. | true |
2,579,595 | def module_for_loader(fxn):
"""Decorator to handle selecting the proper module for loaders.
The decorated function is passed the module to use instead of the module
name. The module passed in to the function is either from sys.modules if
it already exists or is a new module. If the module is new, then __name__
is set the first argument to the method, __loader__ is set to self, and
__package__ is set accordingly (if self.is_package() is defined) will be set
before it is passed to the decorated function (if self.is_package() does
not work for the module it will be set post-load).
If an exception is raised and the decorator created the module it is
subsequently removed from sys.modules.
The decorator assumes that the decorated function takes the module name as
the second argument.
"""
warnings.warn('The import system now takes care of this automatically.',
DeprecationWarning, stacklevel=2)
@functools.wraps(fxn)
def module_for_loader_wrapper(self, fullname, *args, **kwargs):
with _module_to_load(fullname) as module:
module.__loader__ = self
try:
is_package = self.is_package(fullname)
except (ImportError, AttributeError):
pass
else:
if is_package:
module.__package__ = fullname
else:
module.__package__ = fullname.rpartition('.')[0]
# If __package__ was not set above, __import__() will do it later.
return fxn(self, module, *args, **kwargs)
return module_for_loader_wrapper | [
"def",
"module_for_loader",
"(",
"fxn",
")",
":",
"warnings",
".",
"warn",
"(",
"'The import system now takes care of this automatically.'",
",",
"DeprecationWarning",
",",
"stacklevel",
"=",
"2",
")",
"@",
"functools",
".",
"wraps",
"(",
"fxn",
")",
"def",
"module_for_loader_wrapper",
"(",
"self",
",",
"fullname",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"with",
"_module_to_load",
"(",
"fullname",
")",
"as",
"module",
":",
"module",
".",
"__loader__",
"=",
"self",
"try",
":",
"is_package",
"=",
"self",
".",
"is_package",
"(",
"fullname",
")",
"except",
"(",
"ImportError",
",",
"AttributeError",
")",
":",
"pass",
"else",
":",
"if",
"is_package",
":",
"module",
".",
"__package__",
"=",
"fullname",
"else",
":",
"module",
".",
"__package__",
"=",
"fullname",
".",
"rpartition",
"(",
"'.'",
")",
"[",
"0",
"]",
"return",
"fxn",
"(",
"self",
",",
"module",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
"return",
"module_for_loader_wrapper"
] | python | Decorator to handle selecting the proper module for loaders.
The decorated function is passed the module to use instead of the module
name. The module passed in to the function is either from sys.modules if
it already exists or is a new module. If the module is new, then __name__
is set the first argument to the method, __loader__ is set to self, and
__package__ is set accordingly (if self.is_package() is defined) will be set
before it is passed to the decorated function (if self.is_package() does
not work for the module it will be set post-load).
If an exception is raised and the decorator created the module it is
subsequently removed from sys.modules.
The decorator assumes that the decorated function takes the module name as
the second argument. | true |
2,581,377 | def example_exc_handler(tries_remaining, exception, delay):
"""Example exception handler; prints a warning to stderr.
tries_remaining: The number of tries remaining.
exception: The exception instance which was raised.
"""
print >> sys.stderr, "Caught '%s', %d tries remaining, sleeping for %s seconds" % (
exception, tries_remaining, delay) | [
"def",
"example_exc_handler",
"(",
"tries_remaining",
",",
"exception",
",",
"delay",
")",
":",
"print",
">>",
"sys",
".",
"stderr",
",",
"\"Caught '%s', %d tries remaining, sleeping for %s seconds\"",
"%",
"(",
"exception",
",",
"tries_remaining",
",",
"delay",
")"
] | python | Example exception handler; prints a warning to stderr.
tries_remaining: The number of tries remaining.
exception: The exception instance which was raised. | true |
2,583,234 | def handle_extensions(extensions=None, ignored=None):
"""
Organizes multiple extensions that are separated with commas or passed by
using --extension/-e multiple times. Note that the .py extension is ignored
here because of the way non-*.py files are handled in ``extract`` messages
(they are copied to file.ext.py files to trick xgettext to parse them as
Python files).
For example: running::
$ verboselib-manage extract -e js,txt -e xhtml -a
would result in an extension list ``['.js', '.txt', '.xhtml']``
.. code-block:: python
>>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py'])
set(['.html', '.js'])
>>> handle_extensions(['.html, txt,.tpl'])
set(['.html', '.tpl', '.txt'])
Taken `from Django <http://bit.ly/1r7Eokw>`_ and changed a bit.
"""
extensions = extensions or ()
ignored = ignored or ('py', )
ext_list = []
for ext in extensions:
ext_list.extend(ext.replace(' ', '').split(','))
for i, ext in enumerate(ext_list):
if not ext.startswith('.'):
ext_list[i] = '.%s' % ext_list[i]
return set([x for x in ext_list if x.strip('.') not in ignored]) | [
"def",
"handle_extensions",
"(",
"extensions",
"=",
"None",
",",
"ignored",
"=",
"None",
")",
":",
"extensions",
"=",
"extensions",
"or",
"(",
")",
"ignored",
"=",
"ignored",
"or",
"(",
"'py'",
",",
")",
"ext_list",
"=",
"[",
"]",
"for",
"ext",
"in",
"extensions",
":",
"ext_list",
".",
"extend",
"(",
"ext",
".",
"replace",
"(",
"' '",
",",
"''",
")",
".",
"split",
"(",
"','",
")",
")",
"for",
"i",
",",
"ext",
"in",
"enumerate",
"(",
"ext_list",
")",
":",
"if",
"not",
"ext",
".",
"startswith",
"(",
"'.'",
")",
":",
"ext_list",
"[",
"i",
"]",
"=",
"'.%s'",
"%",
"ext_list",
"[",
"i",
"]",
"return",
"set",
"(",
"[",
"x",
"for",
"x",
"in",
"ext_list",
"if",
"x",
".",
"strip",
"(",
"'.'",
")",
"not",
"in",
"ignored",
"]",
")"
] | python | Organizes multiple extensions that are separated with commas or passed by
using --extension/-e multiple times. Note that the .py extension is ignored
here because of the way non-*.py files are handled in ``extract`` messages
(they are copied to file.ext.py files to trick xgettext to parse them as
Python files).
For example: running::
$ verboselib-manage extract -e js,txt -e xhtml -a
would result in an extension list ``['.js', '.txt', '.xhtml']``
.. code-block:: python
>>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py'])
set(['.html', '.js'])
>>> handle_extensions(['.html, txt,.tpl'])
set(['.html', '.tpl', '.txt'])
Taken `from Django <http://bit.ly/1r7Eokw>`_ and changed a bit. | true |
2,587,109 | def literals(choices, prefix="", suffix=""):
"""Create a regex from a space-separated list of literal `choices`.
If provided, `prefix` and `suffix` will be attached to each choice
individually.
"""
return "|".join(prefix + re.escape(c) + suffix for c in choices.split()) | [
"def",
"literals",
"(",
"choices",
",",
"prefix",
"=",
"\"\"",
",",
"suffix",
"=",
"\"\"",
")",
":",
"return",
"\"|\"",
".",
"join",
"(",
"prefix",
"+",
"re",
".",
"escape",
"(",
"c",
")",
"+",
"suffix",
"for",
"c",
"in",
"choices",
".",
"split",
"(",
")",
")"
] | python | Create a regex from a space-separated list of literal `choices`.
If provided, `prefix` and `suffix` will be attached to each choice
individually. | true |
2,587,125 | def normalize_fieldsets(fieldsets):
"""
Make sure the keys in fieldset dictionaries are strings. Returns the
normalized data.
"""
result = []
for name, options in fieldsets:
result.append((name, normalize_dictionary(options)))
return result | [
"def",
"normalize_fieldsets",
"(",
"fieldsets",
")",
":",
"result",
"=",
"[",
"]",
"for",
"name",
",",
"options",
"in",
"fieldsets",
":",
"result",
".",
"append",
"(",
"(",
"name",
",",
"normalize_dictionary",
"(",
"options",
")",
")",
")",
"return",
"result"
] | python | Make sure the keys in fieldset dictionaries are strings. Returns the
normalized data. | true |
2,587,126 | def normalize_dictionary(data_dict):
"""
Converts all the keys in "data_dict" to strings. The keys must be
convertible using str().
"""
for key, value in data_dict.items():
if not isinstance(key, str):
del data_dict[key]
data_dict[str(key)] = value
return data_dict | [
"def",
"normalize_dictionary",
"(",
"data_dict",
")",
":",
"for",
"key",
",",
"value",
"in",
"data_dict",
".",
"items",
"(",
")",
":",
"if",
"not",
"isinstance",
"(",
"key",
",",
"str",
")",
":",
"del",
"data_dict",
"[",
"key",
"]",
"data_dict",
"[",
"str",
"(",
"key",
")",
"]",
"=",
"value",
"return",
"data_dict"
] | python | Converts all the keys in "data_dict" to strings. The keys must be
convertible using str(). | true |
2,587,158 | def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
try:
import setuptools
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
except ImportError:
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
import pkg_resources
try:
pkg_resources.require("setuptools>="+version)
except pkg_resources.VersionConflict, e:
# XXX could we install in a subprocess here?
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first.\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2) | [
"def",
"use_setuptools",
"(",
"version",
"=",
"DEFAULT_VERSION",
",",
"download_base",
"=",
"DEFAULT_URL",
",",
"to_dir",
"=",
"os",
".",
"curdir",
",",
"download_delay",
"=",
"15",
")",
":",
"try",
":",
"import",
"setuptools",
"if",
"setuptools",
".",
"__version__",
"==",
"'0.0.1'",
":",
"print",
">>",
"sys",
".",
"stderr",
",",
"(",
"\"You have an obsolete version of setuptools installed. Please\\n\"",
"\"remove it from your system entirely before rerunning this script.\"",
")",
"sys",
".",
"exit",
"(",
"2",
")",
"except",
"ImportError",
":",
"egg",
"=",
"download_setuptools",
"(",
"version",
",",
"download_base",
",",
"to_dir",
",",
"download_delay",
")",
"sys",
".",
"path",
".",
"insert",
"(",
"0",
",",
"egg",
")",
"import",
"setuptools",
";",
"setuptools",
".",
"bootstrap_install_from",
"=",
"egg",
"import",
"pkg_resources",
"try",
":",
"pkg_resources",
".",
"require",
"(",
"\"setuptools>=\"",
"+",
"version",
")",
"except",
"pkg_resources",
".",
"VersionConflict",
",",
"e",
":",
"print",
">>",
"sys",
".",
"stderr",
",",
"(",
"\"The required version of setuptools (>=%s) is not available, and\\n\"",
"\"can't be installed while this script is running. Please install\\n\"",
"\" a more recent version first.\\n\\n(Currently using %r)\"",
")",
"%",
"(",
"version",
",",
"e",
".",
"args",
"[",
"0",
"]",
")",
"sys",
".",
"exit",
"(",
"2",
")"
] | python | Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script. | true |
2,587,184 | def direct_to_user_template(request, username, template_name,
extra_context=None):
"""
Simple wrapper for Django's :func:`direct_to_template` view.
This view is used when you want to show a template to a specific user. A
wrapper for :func:`direct_to_template` where the template also has access
to the user that is found with ``username``. For ex. used after signup,
activation and confirmation of a new e-mail.
:param username:
String defining the username of the user that made the action.
:param template_name:
String defining the name of the template to use. Defaults to
``accounts/signup_complete.html``.
**Keyword arguments**
``extra_context``
A dictionary containing extra variables that should be passed to the
rendered template. The ``account`` key is always the ``User``
that completed the action.
**Extra context**
``viewed_user``
The currently :class:`User` that is viewed.
"""
user = get_object_or_404(get_user_model(), username__iexact=username)
if not extra_context:
extra_context = dict()
extra_context['viewed_user'] = user
extra_context['profile'] = user.get_profile()
return ExtraContextTemplateView.as_view(template_name=template_name,
extra_context=extra_context)(request) | [
"def",
"direct_to_user_template",
"(",
"request",
",",
"username",
",",
"template_name",
",",
"extra_context",
"=",
"None",
")",
":",
"user",
"=",
"get_object_or_404",
"(",
"get_user_model",
"(",
")",
",",
"username__iexact",
"=",
"username",
")",
"if",
"not",
"extra_context",
":",
"extra_context",
"=",
"dict",
"(",
")",
"extra_context",
"[",
"'viewed_user'",
"]",
"=",
"user",
"extra_context",
"[",
"'profile'",
"]",
"=",
"user",
".",
"get_profile",
"(",
")",
"return",
"ExtraContextTemplateView",
".",
"as_view",
"(",
"template_name",
"=",
"template_name",
",",
"extra_context",
"=",
"extra_context",
")",
"(",
"request",
")"
] | python | Simple wrapper for Django's :func:`direct_to_template` view.
This view is used when you want to show a template to a specific user. A
wrapper for :func:`direct_to_template` where the template also has access
to the user that is found with ``username``. For ex. used after signup,
activation and confirmation of a new e-mail.
:param username:
String defining the username of the user that made the action.
:param template_name:
String defining the name of the template to use. Defaults to
``accounts/signup_complete.html``.
**Keyword arguments**
``extra_context``
A dictionary containing extra variables that should be passed to the
rendered template. The ``account`` key is always the ``User``
that completed the action.
**Extra context**
``viewed_user``
The currently :class:`User` that is viewed. | true |
2,589,290 | def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64) | [
"def",
"as_float_array",
"(",
"X",
",",
"copy",
"=",
"True",
",",
"force_all_finite",
"=",
"True",
")",
":",
"if",
"isinstance",
"(",
"X",
",",
"np",
".",
"matrix",
")",
"or",
"(",
"not",
"isinstance",
"(",
"X",
",",
"np",
".",
"ndarray",
")",
"and",
"not",
"sp",
".",
"issparse",
"(",
"X",
")",
")",
":",
"return",
"check_array",
"(",
"X",
",",
"[",
"'csr'",
",",
"'csc'",
",",
"'coo'",
"]",
",",
"dtype",
"=",
"np",
".",
"float64",
",",
"copy",
"=",
"copy",
",",
"force_all_finite",
"=",
"force_all_finite",
",",
"ensure_2d",
"=",
"False",
")",
"elif",
"sp",
".",
"issparse",
"(",
"X",
")",
"and",
"X",
".",
"dtype",
"in",
"[",
"np",
".",
"float32",
",",
"np",
".",
"float64",
"]",
":",
"return",
"X",
".",
"copy",
"(",
")",
"if",
"copy",
"else",
"X",
"elif",
"X",
".",
"dtype",
"in",
"[",
"np",
".",
"float32",
",",
"np",
".",
"float64",
"]",
":",
"return",
"X",
".",
"copy",
"(",
"'F'",
"if",
"X",
".",
"flags",
"[",
"'F_CONTIGUOUS'",
"]",
"else",
"'C'",
")",
"if",
"copy",
"else",
"X",
"else",
":",
"return",
"X",
".",
"astype",
"(",
"np",
".",
"float32",
"if",
"X",
".",
"dtype",
"==",
"np",
".",
"int32",
"else",
"np",
".",
"float64",
")"
] | python | Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float | true |
2,589,293 | def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result | [
"def",
"indexable",
"(",
"*",
"iterables",
")",
":",
"result",
"=",
"[",
"]",
"for",
"X",
"in",
"iterables",
":",
"if",
"sp",
".",
"issparse",
"(",
"X",
")",
":",
"result",
".",
"append",
"(",
"X",
".",
"tocsr",
"(",
")",
")",
"elif",
"hasattr",
"(",
"X",
",",
"\"__getitem__\"",
")",
"or",
"hasattr",
"(",
"X",
",",
"\"iloc\"",
")",
":",
"result",
".",
"append",
"(",
"X",
")",
"elif",
"X",
"is",
"None",
":",
"result",
".",
"append",
"(",
"X",
")",
"else",
":",
"result",
".",
"append",
"(",
"np",
".",
"array",
"(",
"X",
")",
")",
"check_consistent_length",
"(",
"*",
"result",
")",
"return",
"result"
] | python | Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability. | true |
2,589,296 | def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape)) | [
"def",
"column_or_1d",
"(",
"y",
",",
"warn",
"=",
"False",
")",
":",
"shape",
"=",
"np",
".",
"shape",
"(",
"y",
")",
"if",
"len",
"(",
"shape",
")",
"==",
"1",
":",
"return",
"np",
".",
"ravel",
"(",
"y",
")",
"if",
"len",
"(",
"shape",
")",
"==",
"2",
"and",
"shape",
"[",
"1",
"]",
"==",
"1",
":",
"if",
"warn",
":",
"warnings",
".",
"warn",
"(",
"\"A column-vector y was passed when a 1d array was\"",
"\" expected. Please change the shape of y to \"",
"\"(n_samples, ), for example using ravel().\"",
",",
"DataConversionWarning",
",",
"stacklevel",
"=",
"2",
")",
"return",
"np",
".",
"ravel",
"(",
"y",
")",
"raise",
"ValueError",
"(",
"\"bad input shape {0}\"",
".",
"format",
"(",
"shape",
")",
")"
] | python | Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
Returns
-------
y : array | true |
2,594,897 | def parsemsg(s): # stolen from twisted.words
"""Breaks a message from an IRC server into its prefix, command, and arguments.
"""
prefix = ''
trailing = []
if not s:
raise Exception("Empty line.")
if s[0] == ':':
prefix, s = s[1:].split(' ', 1)
if s.find(' :') != -1:
s, trailing = s.split(' :', 1)
args = s.split()
args.append(trailing)
else:
args = s.split()
command = args.pop(0)
return prefix, command, args | [
"def",
"parsemsg",
"(",
"s",
")",
":",
"prefix",
"=",
"''",
"trailing",
"=",
"[",
"]",
"if",
"not",
"s",
":",
"raise",
"Exception",
"(",
"\"Empty line.\"",
")",
"if",
"s",
"[",
"0",
"]",
"==",
"':'",
":",
"prefix",
",",
"s",
"=",
"s",
"[",
"1",
":",
"]",
".",
"split",
"(",
"' '",
",",
"1",
")",
"if",
"s",
".",
"find",
"(",
"' :'",
")",
"!=",
"-",
"1",
":",
"s",
",",
"trailing",
"=",
"s",
".",
"split",
"(",
"' :'",
",",
"1",
")",
"args",
"=",
"s",
".",
"split",
"(",
")",
"args",
".",
"append",
"(",
"trailing",
")",
"else",
":",
"args",
"=",
"s",
".",
"split",
"(",
")",
"command",
"=",
"args",
".",
"pop",
"(",
"0",
")",
"return",
"prefix",
",",
"command",
",",
"args"
] | python | Breaks a message from an IRC server into its prefix, command, and arguments. | true |
2,598,347 | def requirements():
"""Build the requirements list for this project"""
requirements_list = []
with open('requirements.txt') as requirements:
for install in requirements:
requirements_list.append(install.strip())
return requirements_list | [
"def",
"requirements",
"(",
")",
":",
"requirements_list",
"=",
"[",
"]",
"with",
"open",
"(",
"'requirements.txt'",
")",
"as",
"requirements",
":",
"for",
"install",
"in",
"requirements",
":",
"requirements_list",
".",
"append",
"(",
"install",
".",
"strip",
"(",
")",
")",
"return",
"requirements_list"
] | python | Build the requirements list for this project | true |
2,600,893 | def normalize_rgb_colors_to_hex(css):
"""Convert `rgb(51,102,153)` to `#336699`."""
regex = re.compile(r"rgb\s*\(\s*([0-9,\s]+)\s*\)")
match = regex.search(css)
while match:
colors = [s.strip() for s in match.group(1).split(",")]
hexcolor = '#%.2x%.2x%.2x' % tuple(map(int, colors))
css = css.replace(match.group(), hexcolor)
match = regex.search(css)
return css | [
"def",
"normalize_rgb_colors_to_hex",
"(",
"css",
")",
":",
"regex",
"=",
"re",
".",
"compile",
"(",
"r\"rgb\\s*\\(\\s*([0-9,\\s]+)\\s*\\)\"",
")",
"match",
"=",
"regex",
".",
"search",
"(",
"css",
")",
"while",
"match",
":",
"colors",
"=",
"[",
"s",
".",
"strip",
"(",
")",
"for",
"s",
"in",
"match",
".",
"group",
"(",
"1",
")",
".",
"split",
"(",
"\",\"",
")",
"]",
"hexcolor",
"=",
"'#%.2x%.2x%.2x'",
"%",
"tuple",
"(",
"map",
"(",
"int",
",",
"colors",
")",
")",
"css",
"=",
"css",
".",
"replace",
"(",
"match",
".",
"group",
"(",
")",
",",
"hexcolor",
")",
"match",
"=",
"regex",
".",
"search",
"(",
"css",
")",
"return",
"css"
] | python | Convert `rgb(51,102,153)` to `#336699`. | true |
2,600,894 | def condense_multidimensional_zeros(css):
"""Replace `:0 0 0 0;`, `:0 0 0;` etc. with `:0;`."""
css = css.replace(":0 0 0 0;", ":0;")
css = css.replace(":0 0 0;", ":0;")
css = css.replace(":0 0;", ":0;")
# Revert `background-position:0;` to the valid `background-position:0 0;`.
css = css.replace("background-position:0;", "background-position:0 0;")
return css | [
"def",
"condense_multidimensional_zeros",
"(",
"css",
")",
":",
"css",
"=",
"css",
".",
"replace",
"(",
"\":0 0 0 0;\"",
",",
"\":0;\"",
")",
"css",
"=",
"css",
".",
"replace",
"(",
"\":0 0 0;\"",
",",
"\":0;\"",
")",
"css",
"=",
"css",
".",
"replace",
"(",
"\":0 0;\"",
",",
"\":0;\"",
")",
"css",
"=",
"css",
".",
"replace",
"(",
"\"background-position:0;\"",
",",
"\"background-position:0 0;\"",
")",
"return",
"css"
] | python | Replace `:0 0 0 0;`, `:0 0 0;` etc. with `:0;`. | true |
2,600,895 | def condense_hex_colors(css):
"""Shorten colors from #AABBCC to #ABC where possible."""
regex = re.compile(r"([^\"'=\s])(\s*)#([0-9a-fA-F])([0-9a-fA-F])([0-9a-fA-F])([0-9a-fA-F])([0-9a-fA-F])([0-9a-fA-F])")
match = regex.search(css)
while match:
first = match.group(3) + match.group(5) + match.group(7)
second = match.group(4) + match.group(6) + match.group(8)
if first.lower() == second.lower():
css = css.replace(match.group(), match.group(1) + match.group(2) + '#' + first)
match = regex.search(css, match.end() - 3)
else:
match = regex.search(css, match.end())
return css | [
"def",
"condense_hex_colors",
"(",
"css",
")",
":",
"regex",
"=",
"re",
".",
"compile",
"(",
"r\"([^\\\"'=\\s])(\\s*)#([0-9a-fA-F])([0-9a-fA-F])([0-9a-fA-F])([0-9a-fA-F])([0-9a-fA-F])([0-9a-fA-F])\"",
")",
"match",
"=",
"regex",
".",
"search",
"(",
"css",
")",
"while",
"match",
":",
"first",
"=",
"match",
".",
"group",
"(",
"3",
")",
"+",
"match",
".",
"group",
"(",
"5",
")",
"+",
"match",
".",
"group",
"(",
"7",
")",
"second",
"=",
"match",
".",
"group",
"(",
"4",
")",
"+",
"match",
".",
"group",
"(",
"6",
")",
"+",
"match",
".",
"group",
"(",
"8",
")",
"if",
"first",
".",
"lower",
"(",
")",
"==",
"second",
".",
"lower",
"(",
")",
":",
"css",
"=",
"css",
".",
"replace",
"(",
"match",
".",
"group",
"(",
")",
",",
"match",
".",
"group",
"(",
"1",
")",
"+",
"match",
".",
"group",
"(",
"2",
")",
"+",
"'#'",
"+",
"first",
")",
"match",
"=",
"regex",
".",
"search",
"(",
"css",
",",
"match",
".",
"end",
"(",
")",
"-",
"3",
")",
"else",
":",
"match",
"=",
"regex",
".",
"search",
"(",
"css",
",",
"match",
".",
"end",
"(",
")",
")",
"return",
"css"
] | python | Shorten colors from #AABBCC to #ABC where possible. | true |
2,600,896 | def wrap_css_lines(css, line_length):
"""Wrap the lines of the given CSS to an approximate length."""
lines = []
line_start = 0
for i, char in enumerate(css):
# It's safe to break after `}` characters.
if char == '}' and (i - line_start >= line_length):
lines.append(css[line_start:i + 1])
line_start = i + 1
if line_start < len(css):
lines.append(css[line_start:])
return '\n'.join(lines) | [
"def",
"wrap_css_lines",
"(",
"css",
",",
"line_length",
")",
":",
"lines",
"=",
"[",
"]",
"line_start",
"=",
"0",
"for",
"i",
",",
"char",
"in",
"enumerate",
"(",
"css",
")",
":",
"if",
"char",
"==",
"'}'",
"and",
"(",
"i",
"-",
"line_start",
">=",
"line_length",
")",
":",
"lines",
".",
"append",
"(",
"css",
"[",
"line_start",
":",
"i",
"+",
"1",
"]",
")",
"line_start",
"=",
"i",
"+",
"1",
"if",
"line_start",
"<",
"len",
"(",
"css",
")",
":",
"lines",
".",
"append",
"(",
"css",
"[",
"line_start",
":",
"]",
")",
"return",
"'\\n'",
".",
"join",
"(",
"lines",
")"
] | python | Wrap the lines of the given CSS to an approximate length. | true |
2,604,802 | def getfield(f):
"""convert values from cgi.Field objects to plain values."""
if isinstance(f, list):
return [getfield(x) for x in f]
else:
return f.value | [
"def",
"getfield",
"(",
"f",
")",
":",
"if",
"isinstance",
"(",
"f",
",",
"list",
")",
":",
"return",
"[",
"getfield",
"(",
"x",
")",
"for",
"x",
"in",
"f",
"]",
"else",
":",
"return",
"f",
".",
"value"
] | python | convert values from cgi.Field objects to plain values. | true |
2,607,040 | def admin_url(model, url, object_id=None):
"""
Returns the URL for the given model and admin url name.
"""
opts = model._meta
url = "admin:%s_%s_%s" % (opts.app_label, opts.object_name.lower(), url)
args = ()
if object_id is not None:
args = (object_id,)
return reverse(url, args=args) | [
"def",
"admin_url",
"(",
"model",
",",
"url",
",",
"object_id",
"=",
"None",
")",
":",
"opts",
"=",
"model",
".",
"_meta",
"url",
"=",
"\"admin:%s_%s_%s\"",
"%",
"(",
"opts",
".",
"app_label",
",",
"opts",
".",
"object_name",
".",
"lower",
"(",
")",
",",
"url",
")",
"args",
"=",
"(",
")",
"if",
"object_id",
"is",
"not",
"None",
":",
"args",
"=",
"(",
"object_id",
",",
")",
"return",
"reverse",
"(",
"url",
",",
"args",
"=",
"args",
")"
] | python | Returns the URL for the given model and admin url name. | true |
2,614,857 | def get_random_string(length=12,
allowed_chars='abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):
"""
Return a securely generated random string.
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
"""
if not using_sysrandom:
# This is ugly, and a hack, but it makes things better than
# the alternative of predictability. This re-seeds the PRNG
# using a value that is hard for an attacker to predict, every
# time a random string is required. This may change the
# properties of the chosen random sequence slightly, but this
# is better than absolute predictability.
random.seed(
hashlib.sha256(
('%s%s%s' % (random.getstate(), time.time(), settings.SECRET_KEY)).encode()
).digest()
)
return ''.join(random.choice(allowed_chars) for i in range(length)) | [
"def",
"get_random_string",
"(",
"length",
"=",
"12",
",",
"allowed_chars",
"=",
"'abcdefghijklmnopqrstuvwxyz'",
"'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'",
")",
":",
"if",
"not",
"using_sysrandom",
":",
"random",
".",
"seed",
"(",
"hashlib",
".",
"sha256",
"(",
"(",
"'%s%s%s'",
"%",
"(",
"random",
".",
"getstate",
"(",
")",
",",
"time",
".",
"time",
"(",
")",
",",
"settings",
".",
"SECRET_KEY",
")",
")",
".",
"encode",
"(",
")",
")",
".",
"digest",
"(",
")",
")",
"return",
"''",
".",
"join",
"(",
"random",
".",
"choice",
"(",
"allowed_chars",
")",
"for",
"i",
"in",
"range",
"(",
"length",
")",
")"
] | python | Return a securely generated random string.
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits | true |
2,614,860 | def mask_hash(hash, show=6, char="*"):
"""
Return the given hash, with only the first ``show`` number shown. The
rest are masked with ``char`` for security reasons.
"""
masked = hash[:show]
masked += char * len(hash[show:])
return masked | [
"def",
"mask_hash",
"(",
"hash",
",",
"show",
"=",
"6",
",",
"char",
"=",
"\"*\"",
")",
":",
"masked",
"=",
"hash",
"[",
":",
"show",
"]",
"masked",
"+=",
"char",
"*",
"len",
"(",
"hash",
"[",
"show",
":",
"]",
")",
"return",
"masked"
] | python | Return the given hash, with only the first ``show`` number shown. The
rest are masked with ``char`` for security reasons. | true |
2,614,908 | def tokenize_asdl(buf):
"""Tokenize the given buffer. Yield Token objects."""
for lineno, line in enumerate(buf.splitlines(), 1):
for m in re.finditer(r'\s*(\w+|--.*|.)', line.strip()):
c = m.group(1)
if c[0].isalpha():
# Some kind of identifier
if c[0].isupper():
yield Token(TokenKind.ConstructorId, c, lineno)
else:
yield Token(TokenKind.TypeId, c, lineno)
elif c[:2] == '--':
# Comment
break
else:
# Operators
try:
op_kind = TokenKind.operator_table[c]
except KeyError:
raise ASDLSyntaxError('Invalid operator %s' % c, lineno)
yield Token(op_kind, c, lineno) | [
"def",
"tokenize_asdl",
"(",
"buf",
")",
":",
"for",
"lineno",
",",
"line",
"in",
"enumerate",
"(",
"buf",
".",
"splitlines",
"(",
")",
",",
"1",
")",
":",
"for",
"m",
"in",
"re",
".",
"finditer",
"(",
"r'\\s*(\\w+|--.*|.)'",
",",
"line",
".",
"strip",
"(",
")",
")",
":",
"c",
"=",
"m",
".",
"group",
"(",
"1",
")",
"if",
"c",
"[",
"0",
"]",
".",
"isalpha",
"(",
")",
":",
"if",
"c",
"[",
"0",
"]",
".",
"isupper",
"(",
")",
":",
"yield",
"Token",
"(",
"TokenKind",
".",
"ConstructorId",
",",
"c",
",",
"lineno",
")",
"else",
":",
"yield",
"Token",
"(",
"TokenKind",
".",
"TypeId",
",",
"c",
",",
"lineno",
")",
"elif",
"c",
"[",
":",
"2",
"]",
"==",
"'--'",
":",
"break",
"else",
":",
"try",
":",
"op_kind",
"=",
"TokenKind",
".",
"operator_table",
"[",
"c",
"]",
"except",
"KeyError",
":",
"raise",
"ASDLSyntaxError",
"(",
"'Invalid operator %s'",
"%",
"c",
",",
"lineno",
")",
"yield",
"Token",
"(",
"op_kind",
",",
"c",
",",
"lineno",
")"
] | python | Tokenize the given buffer. Yield Token objects. | true |
2,615,191 | def find_executable(executable, path=None):
"""Tries to find 'executable' in the directories listed in 'path'.
A string listing directories separated by 'os.pathsep'; defaults to
os.environ['PATH']. Returns the complete filename or None if not found.
"""
if path is None:
path = os.environ['PATH']
paths = path.split(os.pathsep)
base, ext = os.path.splitext(executable)
if (sys.platform == 'win32' or os.name == 'os2') and (ext != '.exe'):
executable = executable + '.exe'
if not os.path.isfile(executable):
for p in paths:
f = os.path.join(p, executable)
if os.path.isfile(f):
# the file exists, we have a shot at spawn working
return f
return None
else:
return executable | [
"def",
"find_executable",
"(",
"executable",
",",
"path",
"=",
"None",
")",
":",
"if",
"path",
"is",
"None",
":",
"path",
"=",
"os",
".",
"environ",
"[",
"'PATH'",
"]",
"paths",
"=",
"path",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
"base",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"executable",
")",
"if",
"(",
"sys",
".",
"platform",
"==",
"'win32'",
"or",
"os",
".",
"name",
"==",
"'os2'",
")",
"and",
"(",
"ext",
"!=",
"'.exe'",
")",
":",
"executable",
"=",
"executable",
"+",
"'.exe'",
"if",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"executable",
")",
":",
"for",
"p",
"in",
"paths",
":",
"f",
"=",
"os",
".",
"path",
".",
"join",
"(",
"p",
",",
"executable",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"f",
")",
":",
"return",
"f",
"return",
"None",
"else",
":",
"return",
"executable"
] | python | Tries to find 'executable' in the directories listed in 'path'.
A string listing directories separated by 'os.pathsep'; defaults to
os.environ['PATH']. Returns the complete filename or None if not found. | true |
2,615,849 | def keep_kwargs_partial(func, *args, **keywords):
"""Like functools.partial but instead of using the new kwargs, keeps the old ones."""
def newfunc(*fargs, **fkeywords):
newkeywords = fkeywords.copy()
newkeywords.update(keywords)
return func(*(args + fargs), **newkeywords)
newfunc.func = func
newfunc.args = args
newfunc.keywords = keywords
return newfunc | [
"def",
"keep_kwargs_partial",
"(",
"func",
",",
"*",
"args",
",",
"**",
"keywords",
")",
":",
"def",
"newfunc",
"(",
"*",
"fargs",
",",
"**",
"fkeywords",
")",
":",
"newkeywords",
"=",
"fkeywords",
".",
"copy",
"(",
")",
"newkeywords",
".",
"update",
"(",
"keywords",
")",
"return",
"func",
"(",
"*",
"(",
"args",
"+",
"fargs",
")",
",",
"**",
"newkeywords",
")",
"newfunc",
".",
"func",
"=",
"func",
"newfunc",
".",
"args",
"=",
"args",
"newfunc",
".",
"keywords",
"=",
"keywords",
"return",
"newfunc"
] | python | Like functools.partial but instead of using the new kwargs, keeps the old ones. | true |
2,623,014 | def get_access_flags_string(value):
"""
Transform an access flags to the corresponding string
:param value: the value of the access flags
:type value: int
:rtype: string
"""
buff = ""
for i in ACCESS_FLAGS:
if (i[0] & value) == i[0]:
buff += i[1] + " "
if buff != "":
return buff[:-1]
return buff | [
"def",
"get_access_flags_string",
"(",
"value",
")",
":",
"buff",
"=",
"\"\"",
"for",
"i",
"in",
"ACCESS_FLAGS",
":",
"if",
"(",
"i",
"[",
"0",
"]",
"&",
"value",
")",
"==",
"i",
"[",
"0",
"]",
":",
"buff",
"+=",
"i",
"[",
"1",
"]",
"+",
"\" \"",
"if",
"buff",
"!=",
"\"\"",
":",
"return",
"buff",
"[",
":",
"-",
"1",
"]",
"return",
"buff"
] | python | Transform an access flags to the corresponding string
:param value: the value of the access flags
:type value: int
:rtype: string | true |
2,627,190 | def long2str(l):
"""Convert an integer to a string."""
if type(l) not in (types.IntType, types.LongType):
raise ValueError('the input must be an integer')
if l < 0:
raise ValueError('the input must be greater than 0')
s = ''
while l:
s = s + chr(l & 255)
l >>= 8
return s | [
"def",
"long2str",
"(",
"l",
")",
":",
"if",
"type",
"(",
"l",
")",
"not",
"in",
"(",
"types",
".",
"IntType",
",",
"types",
".",
"LongType",
")",
":",
"raise",
"ValueError",
"(",
"'the input must be an integer'",
")",
"if",
"l",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"'the input must be greater than 0'",
")",
"s",
"=",
"''",
"while",
"l",
":",
"s",
"=",
"s",
"+",
"chr",
"(",
"l",
"&",
"255",
")",
"l",
">>=",
"8",
"return",
"s"
] | python | Convert an integer to a string. | true |
2,627,193 | def is_android(filename):
"""Return the type of the file
@param filename : the filename
@rtype : "APK", "DEX", None
"""
if not filename:
return None
with open(filename, "rb") as fd:
f_bytes = fd.read()
return is_android_raw(f_bytes)
return None | [
"def",
"is_android",
"(",
"filename",
")",
":",
"if",
"not",
"filename",
":",
"return",
"None",
"with",
"open",
"(",
"filename",
",",
"\"rb\"",
")",
"as",
"fd",
":",
"f_bytes",
"=",
"fd",
".",
"read",
"(",
")",
"return",
"is_android_raw",
"(",
"f_bytes",
")",
"return",
"None"
] | python | Return the type of the file
@param filename : the filename
@rtype : "APK", "DEX", None | true |
2,628,052 | def _EndRecData64(fpin, offset, endrec):
"""
Read the ZIP64 end-of-archive records and use that to update endrec
"""
try:
fpin.seek(offset - sizeEndCentDir64Locator, 2)
except IOError:
# If the seek fails, the file is not large enough to contain a ZIP64
# end-of-archive record, so just return the end record we were given.
return endrec
data = fpin.read(sizeEndCentDir64Locator)
sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data)
if sig != stringEndArchive64Locator:
return endrec
if diskno != 0 or disks != 1:
raise BadZipfile("zipfiles that span multiple disks are not supported")
# Assume no 'zip64 extensible data'
fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2)
data = fpin.read(sizeEndCentDir64)
sig, sz, create_version, read_version, disk_num, disk_dir, \
dircount, dircount2, dirsize, diroffset = \
struct.unpack(structEndArchive64, data)
if sig != stringEndArchive64:
return endrec
# Update the original endrec using data from the ZIP64 record
endrec[_ECD_SIGNATURE] = sig
endrec[_ECD_DISK_NUMBER] = disk_num
endrec[_ECD_DISK_START] = disk_dir
endrec[_ECD_ENTRIES_THIS_DISK] = dircount
endrec[_ECD_ENTRIES_TOTAL] = dircount2
endrec[_ECD_SIZE] = dirsize
endrec[_ECD_OFFSET] = diroffset
return endrec | [
"def",
"_EndRecData64",
"(",
"fpin",
",",
"offset",
",",
"endrec",
")",
":",
"try",
":",
"fpin",
".",
"seek",
"(",
"offset",
"-",
"sizeEndCentDir64Locator",
",",
"2",
")",
"except",
"IOError",
":",
"return",
"endrec",
"data",
"=",
"fpin",
".",
"read",
"(",
"sizeEndCentDir64Locator",
")",
"sig",
",",
"diskno",
",",
"reloff",
",",
"disks",
"=",
"struct",
".",
"unpack",
"(",
"structEndArchive64Locator",
",",
"data",
")",
"if",
"sig",
"!=",
"stringEndArchive64Locator",
":",
"return",
"endrec",
"if",
"diskno",
"!=",
"0",
"or",
"disks",
"!=",
"1",
":",
"raise",
"BadZipfile",
"(",
"\"zipfiles that span multiple disks are not supported\"",
")",
"fpin",
".",
"seek",
"(",
"offset",
"-",
"sizeEndCentDir64Locator",
"-",
"sizeEndCentDir64",
",",
"2",
")",
"data",
"=",
"fpin",
".",
"read",
"(",
"sizeEndCentDir64",
")",
"sig",
",",
"sz",
",",
"create_version",
",",
"read_version",
",",
"disk_num",
",",
"disk_dir",
",",
"dircount",
",",
"dircount2",
",",
"dirsize",
",",
"diroffset",
"=",
"struct",
".",
"unpack",
"(",
"structEndArchive64",
",",
"data",
")",
"if",
"sig",
"!=",
"stringEndArchive64",
":",
"return",
"endrec",
"endrec",
"[",
"_ECD_SIGNATURE",
"]",
"=",
"sig",
"endrec",
"[",
"_ECD_DISK_NUMBER",
"]",
"=",
"disk_num",
"endrec",
"[",
"_ECD_DISK_START",
"]",
"=",
"disk_dir",
"endrec",
"[",
"_ECD_ENTRIES_THIS_DISK",
"]",
"=",
"dircount",
"endrec",
"[",
"_ECD_ENTRIES_TOTAL",
"]",
"=",
"dircount2",
"endrec",
"[",
"_ECD_SIZE",
"]",
"=",
"dirsize",
"endrec",
"[",
"_ECD_OFFSET",
"]",
"=",
"diroffset",
"return",
"endrec"
] | python | Read the ZIP64 end-of-archive records and use that to update endrec | true |
2,628,308 | def extraneous_whitespace(logical_line):
"""
Avoid extraneous whitespace in the following situations:
- Immediately inside parentheses, brackets or braces.
- Immediately before a comma, semicolon, or colon.
"""
line = logical_line
for char in '([{':
found = line.find(char + ' ')
if found > -1:
return found + 1, "E201 whitespace after '%s'" % char
for char in '}])':
found = line.find(' ' + char)
if found > -1 and line[found - 1] != ',':
return found, "E202 whitespace before '%s'" % char
for char in ',;:':
found = line.find(' ' + char)
if found > -1:
return found, "E203 whitespace before '%s'" % char | [
"def",
"extraneous_whitespace",
"(",
"logical_line",
")",
":",
"line",
"=",
"logical_line",
"for",
"char",
"in",
"'([{'",
":",
"found",
"=",
"line",
".",
"find",
"(",
"char",
"+",
"' '",
")",
"if",
"found",
">",
"-",
"1",
":",
"return",
"found",
"+",
"1",
",",
"\"E201 whitespace after '%s'\"",
"%",
"char",
"for",
"char",
"in",
"'}])'",
":",
"found",
"=",
"line",
".",
"find",
"(",
"' '",
"+",
"char",
")",
"if",
"found",
">",
"-",
"1",
"and",
"line",
"[",
"found",
"-",
"1",
"]",
"!=",
"','",
":",
"return",
"found",
",",
"\"E202 whitespace before '%s'\"",
"%",
"char",
"for",
"char",
"in",
"',;:'",
":",
"found",
"=",
"line",
".",
"find",
"(",
"' '",
"+",
"char",
")",
"if",
"found",
">",
"-",
"1",
":",
"return",
"found",
",",
"\"E203 whitespace before '%s'\"",
"%",
"char"
] | python | Avoid extraneous whitespace in the following situations:
- Immediately inside parentheses, brackets or braces.
- Immediately before a comma, semicolon, or colon. | true |
2,628,309 | def indentation(logical_line, previous_logical, indent_char,
indent_level, previous_indent_level):
"""
Use 4 spaces per indentation level.
For really old code that you don't want to mess up, you can continue to
use 8-space tabs.
"""
if indent_char == ' ' and indent_level % 4:
return 0, "E111 indentation is not a multiple of four"
indent_expect = previous_logical.endswith(':')
if indent_expect and indent_level <= previous_indent_level:
return 0, "E112 expected an indented block"
if indent_level > previous_indent_level and not indent_expect:
return 0, "E113 unexpected indentation" | [
"def",
"indentation",
"(",
"logical_line",
",",
"previous_logical",
",",
"indent_char",
",",
"indent_level",
",",
"previous_indent_level",
")",
":",
"if",
"indent_char",
"==",
"' '",
"and",
"indent_level",
"%",
"4",
":",
"return",
"0",
",",
"\"E111 indentation is not a multiple of four\"",
"indent_expect",
"=",
"previous_logical",
".",
"endswith",
"(",
"':'",
")",
"if",
"indent_expect",
"and",
"indent_level",
"<=",
"previous_indent_level",
":",
"return",
"0",
",",
"\"E112 expected an indented block\"",
"if",
"indent_level",
">",
"previous_indent_level",
"and",
"not",
"indent_expect",
":",
"return",
"0",
",",
"\"E113 unexpected indentation\""
] | python | Use 4 spaces per indentation level.
For really old code that you don't want to mess up, you can continue to
use 8-space tabs. | true |
2,628,314 | def find_checks(argument_name):
"""
Find all globally visible functions where the first argument name
starts with argument_name.
"""
checks = []
function_type = type(find_checks)
for name, function in globals().iteritems():
if type(function) is function_type:
args = inspect.getargspec(function)[0]
if len(args) >= 1 and args[0].startswith(argument_name):
checks.append((name, function, args))
checks.sort()
return checks | [
"def",
"find_checks",
"(",
"argument_name",
")",
":",
"checks",
"=",
"[",
"]",
"function_type",
"=",
"type",
"(",
"find_checks",
")",
"for",
"name",
",",
"function",
"in",
"globals",
"(",
")",
".",
"iteritems",
"(",
")",
":",
"if",
"type",
"(",
"function",
")",
"is",
"function_type",
":",
"args",
"=",
"inspect",
".",
"getargspec",
"(",
"function",
")",
"[",
"0",
"]",
"if",
"len",
"(",
"args",
")",
">=",
"1",
"and",
"args",
"[",
"0",
"]",
".",
"startswith",
"(",
"argument_name",
")",
":",
"checks",
".",
"append",
"(",
"(",
"name",
",",
"function",
",",
"args",
")",
")",
"checks",
".",
"sort",
"(",
")",
"return",
"checks"
] | python | Find all globally visible functions where the first argument name
starts with argument_name. | true |
2,628,315 | def mute_string(text):
"""
Replace contents with 'xxx' to prevent syntax matching.
>>> mute_string('"abc"')
'"xxx"'
>>> mute_string("'''abc'''")
"'''xxx'''"
>>> mute_string("r'abc'")
"r'xxx'"
"""
start = 1
end = len(text) - 1
# String modifiers (e.g. u or r)
if text.endswith('"'):
start += text.index('"')
elif text.endswith("'"):
start += text.index("'")
# Triple quotes
if text.endswith('"""') or text.endswith("'''"):
start += 2
end -= 2
return text[:start] + 'x' * (end - start) + text[end:] | [
"def",
"mute_string",
"(",
"text",
")",
":",
"start",
"=",
"1",
"end",
"=",
"len",
"(",
"text",
")",
"-",
"1",
"if",
"text",
".",
"endswith",
"(",
"'\"'",
")",
":",
"start",
"+=",
"text",
".",
"index",
"(",
"'\"'",
")",
"elif",
"text",
".",
"endswith",
"(",
"\"'\"",
")",
":",
"start",
"+=",
"text",
".",
"index",
"(",
"\"'\"",
")",
"if",
"text",
".",
"endswith",
"(",
"'\"\"\"'",
")",
"or",
"text",
".",
"endswith",
"(",
"\"'''\"",
")",
":",
"start",
"+=",
"2",
"end",
"-=",
"2",
"return",
"text",
"[",
":",
"start",
"]",
"+",
"'x'",
"*",
"(",
"end",
"-",
"start",
")",
"+",
"text",
"[",
"end",
":",
"]"
] | python | Replace contents with 'xxx' to prevent syntax matching.
>>> mute_string('"abc"')
'"xxx"'
>>> mute_string("'''abc'''")
"'''xxx'''"
>>> mute_string("r'abc'")
"r'xxx'" | true |
2,628,316 | def input_file(filename):
"""
Run all checks on a Python source file.
"""
if excluded(filename) or not filename_match(filename):
return {}
if options.verbose:
message('checking ' + filename)
options.counters['files'] = options.counters.get('files', 0) + 1
errors = Checker(filename).check_all()
if options.testsuite and not errors:
message("%s: %s" % (filename, "no errors found"))
return errors | [
"def",
"input_file",
"(",
"filename",
")",
":",
"if",
"excluded",
"(",
"filename",
")",
"or",
"not",
"filename_match",
"(",
"filename",
")",
":",
"return",
"{",
"}",
"if",
"options",
".",
"verbose",
":",
"message",
"(",
"'checking '",
"+",
"filename",
")",
"options",
".",
"counters",
"[",
"'files'",
"]",
"=",
"options",
".",
"counters",
".",
"get",
"(",
"'files'",
",",
"0",
")",
"+",
"1",
"errors",
"=",
"Checker",
"(",
"filename",
")",
".",
"check_all",
"(",
")",
"if",
"options",
".",
"testsuite",
"and",
"not",
"errors",
":",
"message",
"(",
"\"%s: %s\"",
"%",
"(",
"filename",
",",
"\"no errors found\"",
")",
")",
"return",
"errors"
] | python | Run all checks on a Python source file. | true |
2,628,320 | def get_statistics(prefix=''):
"""
Get statistics for message codes that start with the prefix.
prefix='' matches all errors and warnings
prefix='E' matches all errors
prefix='W' matches all warnings
prefix='E4' matches all errors that have to do with imports
"""
stats = []
keys = options.messages.keys()
keys.sort()
for key in keys:
if key.startswith(prefix):
stats.append('%-7s %s %s' %
(options.counters[key], key, options.messages[key]))
return stats | [
"def",
"get_statistics",
"(",
"prefix",
"=",
"''",
")",
":",
"stats",
"=",
"[",
"]",
"keys",
"=",
"options",
".",
"messages",
".",
"keys",
"(",
")",
"keys",
".",
"sort",
"(",
")",
"for",
"key",
"in",
"keys",
":",
"if",
"key",
".",
"startswith",
"(",
"prefix",
")",
":",
"stats",
".",
"append",
"(",
"'%-7s %s %s'",
"%",
"(",
"options",
".",
"counters",
"[",
"key",
"]",
",",
"key",
",",
"options",
".",
"messages",
"[",
"key",
"]",
")",
")",
"return",
"stats"
] | python | Get statistics for message codes that start with the prefix.
prefix='' matches all errors and warnings
prefix='E' matches all errors
prefix='W' matches all warnings
prefix='E4' matches all errors that have to do with imports | true |
2,628,321 | def print_benchmark(elapsed):
"""
Print benchmark numbers.
"""
print '%-7.2f %s' % (elapsed, 'seconds elapsed')
keys = ['directories', 'files',
'logical lines', 'physical lines']
for key in keys:
if key in options.counters:
print '%-7d %s per second (%d total)' % (
options.counters[key] / elapsed, key,
options.counters[key]) | [
"def",
"print_benchmark",
"(",
"elapsed",
")",
":",
"print",
"'%-7.2f %s'",
"%",
"(",
"elapsed",
",",
"'seconds elapsed'",
")",
"keys",
"=",
"[",
"'directories'",
",",
"'files'",
",",
"'logical lines'",
",",
"'physical lines'",
"]",
"for",
"key",
"in",
"keys",
":",
"if",
"key",
"in",
"options",
".",
"counters",
":",
"print",
"'%-7d %s per second (%d total)'",
"%",
"(",
"options",
".",
"counters",
"[",
"key",
"]",
"/",
"elapsed",
",",
"key",
",",
"options",
".",
"counters",
"[",
"key",
"]",
")"
] | python | Print benchmark numbers. | true |
2,628,323 | def _main():
"""
Parse options and run checks on Python source.
"""
options, args = process_options()
if options.doctest:
import doctest
return doctest.testmod()
start_time = time.time()
errors = 0
for path in args:
if os.path.isdir(path):
errors += input_dir(path)
else:
errors += input_file(path)
elapsed = time.time() - start_time
if options.statistics:
print_statistics()
if options.benchmark:
print_benchmark(elapsed)
return errors > 0 | [
"def",
"_main",
"(",
")",
":",
"options",
",",
"args",
"=",
"process_options",
"(",
")",
"if",
"options",
".",
"doctest",
":",
"import",
"doctest",
"return",
"doctest",
".",
"testmod",
"(",
")",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"errors",
"=",
"0",
"for",
"path",
"in",
"args",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"errors",
"+=",
"input_dir",
"(",
"path",
")",
"else",
":",
"errors",
"+=",
"input_file",
"(",
"path",
")",
"elapsed",
"=",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
"if",
"options",
".",
"statistics",
":",
"print_statistics",
"(",
")",
"if",
"options",
".",
"benchmark",
":",
"print_benchmark",
"(",
"elapsed",
")",
"return",
"errors",
">",
"0"
] | python | Parse options and run checks on Python source. | true |
2,631,263 | def checksum_file(scheme, path):
"""Return the checksum (hex digest) of a file"""
h = getattr(hashlib, scheme)()
with open(path, 'rb') as f:
chunk = f.read(65535)
while chunk:
h.update(chunk)
chunk = f.read(65535)
return h.hexdigest() | [
"def",
"checksum_file",
"(",
"scheme",
",",
"path",
")",
":",
"h",
"=",
"getattr",
"(",
"hashlib",
",",
"scheme",
")",
"(",
")",
"with",
"open",
"(",
"path",
",",
"'rb'",
")",
"as",
"f",
":",
"chunk",
"=",
"f",
".",
"read",
"(",
"65535",
")",
"while",
"chunk",
":",
"h",
".",
"update",
"(",
"chunk",
")",
"chunk",
"=",
"f",
".",
"read",
"(",
"65535",
")",
"return",
"h",
".",
"hexdigest",
"(",
")"
] | python | Return the checksum (hex digest) of a file | true |
2,632,514 | def ae(actual, predicted):
"""
Computes the absolute error.
This function computes the absolute error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The absolute error between actual and predicted
"""
return np.abs(np.array(actual)-np.array(predicted)) | [
"def",
"ae",
"(",
"actual",
",",
"predicted",
")",
":",
"return",
"np",
".",
"abs",
"(",
"np",
".",
"array",
"(",
"actual",
")",
"-",
"np",
".",
"array",
"(",
"predicted",
")",
")"
] | python | Computes the absolute error.
This function computes the absolute error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The absolute error between actual and predicted | true |
2,632,515 | def ce(actual, predicted):
"""
Computes the classification error.
This function computes the classification error between two lists
Parameters
----------
actual : list
A list of the true classes
predicted : list
A list of the predicted classes
Returns
-------
score : double
The classification error between actual and predicted
"""
return (sum([1.0 for x,y in zip(actual,predicted) if x != y]) /
len(actual)) | [
"def",
"ce",
"(",
"actual",
",",
"predicted",
")",
":",
"return",
"(",
"sum",
"(",
"[",
"1.0",
"for",
"x",
",",
"y",
"in",
"zip",
"(",
"actual",
",",
"predicted",
")",
"if",
"x",
"!=",
"y",
"]",
")",
"/",
"len",
"(",
"actual",
")",
")"
] | python | Computes the classification error.
This function computes the classification error between two lists
Parameters
----------
actual : list
A list of the true classes
predicted : list
A list of the predicted classes
Returns
-------
score : double
The classification error between actual and predicted | true |
2,632,516 | def se(actual, predicted):
"""
Computes the squared error.
This function computes the squared error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The squared error between actual and predicted
"""
return np.power(np.array(actual)-np.array(predicted), 2) | [
"def",
"se",
"(",
"actual",
",",
"predicted",
")",
":",
"return",
"np",
".",
"power",
"(",
"np",
".",
"array",
"(",
"actual",
")",
"-",
"np",
".",
"array",
"(",
"predicted",
")",
",",
"2",
")"
] | python | Computes the squared error.
This function computes the squared error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The squared error between actual and predicted | true |
2,632,517 | def sle(actual, predicted):
"""
Computes the squared log error.
This function computes the squared log error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The squared log error between actual and predicted
"""
return (np.power(np.log(np.array(actual)+1) -
np.log(np.array(predicted)+1), 2)) | [
"def",
"sle",
"(",
"actual",
",",
"predicted",
")",
":",
"return",
"(",
"np",
".",
"power",
"(",
"np",
".",
"log",
"(",
"np",
".",
"array",
"(",
"actual",
")",
"+",
"1",
")",
"-",
"np",
".",
"log",
"(",
"np",
".",
"array",
"(",
"predicted",
")",
"+",
"1",
")",
",",
"2",
")",
")"
] | python | Computes the squared log error.
This function computes the squared log error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The squared log error between actual and predicted | true |
2,632,518 | def ll(actual, predicted):
"""
Computes the log likelihood.
This function computes the log likelihood between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The log likelihood error between actual and predicted
"""
actual = np.array(actual)
predicted = np.array(predicted)
err = np.seterr(all='ignore')
score = -(actual*np.log(predicted)+(1-actual)*np.log(1-predicted))
np.seterr(divide=err['divide'], over=err['over'],
under=err['under'], invalid=err['invalid'])
if type(score)==np.ndarray:
score[np.isnan(score)] = 0
else:
if np.isnan(score):
score = 0
return score | [
"def",
"ll",
"(",
"actual",
",",
"predicted",
")",
":",
"actual",
"=",
"np",
".",
"array",
"(",
"actual",
")",
"predicted",
"=",
"np",
".",
"array",
"(",
"predicted",
")",
"err",
"=",
"np",
".",
"seterr",
"(",
"all",
"=",
"'ignore'",
")",
"score",
"=",
"-",
"(",
"actual",
"*",
"np",
".",
"log",
"(",
"predicted",
")",
"+",
"(",
"1",
"-",
"actual",
")",
"*",
"np",
".",
"log",
"(",
"1",
"-",
"predicted",
")",
")",
"np",
".",
"seterr",
"(",
"divide",
"=",
"err",
"[",
"'divide'",
"]",
",",
"over",
"=",
"err",
"[",
"'over'",
"]",
",",
"under",
"=",
"err",
"[",
"'under'",
"]",
",",
"invalid",
"=",
"err",
"[",
"'invalid'",
"]",
")",
"if",
"type",
"(",
"score",
")",
"==",
"np",
".",
"ndarray",
":",
"score",
"[",
"np",
".",
"isnan",
"(",
"score",
")",
"]",
"=",
"0",
"else",
":",
"if",
"np",
".",
"isnan",
"(",
"score",
")",
":",
"score",
"=",
"0",
"return",
"score"
] | python | Computes the log likelihood.
This function computes the log likelihood between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The log likelihood error between actual and predicted | true |
2,632,827 | def pbkdf2_bin(data, salt, iterations=1000, keylen=24, hashfunc=None):
"""Returns a binary digest for the PBKDF2 hash algorithm of `data`
with the given `salt`. It iterates `iterations` time and produces a
key of `keylen` bytes. By default SHA-1 is used as hash function,
a different hashlib `hashfunc` can be provided.
"""
hashfunc = hashfunc or hashlib.sha1
mac = hmac.new(bytes_(data), None, hashfunc)
def _pseudorandom(x, mac=mac):
h = mac.copy()
h.update(bytes_(x))
if not PY2:
return [x for x in h.digest()]
else:
return map(ord, h.digest())
buf = []
for block in range_(1, -(-keylen // mac.digest_size) + 1):
rv = u = _pseudorandom(bytes_(salt) + _pack_int(block))
for i in range_(iterations - 1):
if not PY2:
u = _pseudorandom(bytes(u))
else:
u = _pseudorandom(''.join(map(chr, u)))
rv = starmap(xor, zip(rv, u))
buf.extend(rv)
if not PY2:
return bytes(buf)[:keylen]
else:
return ''.join(map(chr, buf))[:keylen] | [
"def",
"pbkdf2_bin",
"(",
"data",
",",
"salt",
",",
"iterations",
"=",
"1000",
",",
"keylen",
"=",
"24",
",",
"hashfunc",
"=",
"None",
")",
":",
"hashfunc",
"=",
"hashfunc",
"or",
"hashlib",
".",
"sha1",
"mac",
"=",
"hmac",
".",
"new",
"(",
"bytes_",
"(",
"data",
")",
",",
"None",
",",
"hashfunc",
")",
"def",
"_pseudorandom",
"(",
"x",
",",
"mac",
"=",
"mac",
")",
":",
"h",
"=",
"mac",
".",
"copy",
"(",
")",
"h",
".",
"update",
"(",
"bytes_",
"(",
"x",
")",
")",
"if",
"not",
"PY2",
":",
"return",
"[",
"x",
"for",
"x",
"in",
"h",
".",
"digest",
"(",
")",
"]",
"else",
":",
"return",
"map",
"(",
"ord",
",",
"h",
".",
"digest",
"(",
")",
")",
"buf",
"=",
"[",
"]",
"for",
"block",
"in",
"range_",
"(",
"1",
",",
"-",
"(",
"-",
"keylen",
"//",
"mac",
".",
"digest_size",
")",
"+",
"1",
")",
":",
"rv",
"=",
"u",
"=",
"_pseudorandom",
"(",
"bytes_",
"(",
"salt",
")",
"+",
"_pack_int",
"(",
"block",
")",
")",
"for",
"i",
"in",
"range_",
"(",
"iterations",
"-",
"1",
")",
":",
"if",
"not",
"PY2",
":",
"u",
"=",
"_pseudorandom",
"(",
"bytes",
"(",
"u",
")",
")",
"else",
":",
"u",
"=",
"_pseudorandom",
"(",
"''",
".",
"join",
"(",
"map",
"(",
"chr",
",",
"u",
")",
")",
")",
"rv",
"=",
"starmap",
"(",
"xor",
",",
"zip",
"(",
"rv",
",",
"u",
")",
")",
"buf",
".",
"extend",
"(",
"rv",
")",
"if",
"not",
"PY2",
":",
"return",
"bytes",
"(",
"buf",
")",
"[",
":",
"keylen",
"]",
"else",
":",
"return",
"''",
".",
"join",
"(",
"map",
"(",
"chr",
",",
"buf",
")",
")",
"[",
":",
"keylen",
"]"
] | python | Returns a binary digest for the PBKDF2 hash algorithm of `data`
with the given `salt`. It iterates `iterations` time and produces a
key of `keylen` bytes. By default SHA-1 is used as hash function,
a different hashlib `hashfunc` can be provided. | true |
2,632,856 | def get_compiler(compiler, **compiler_attrs):
"""get and customize a compiler"""
if compiler is None or isinstance(compiler, str):
cc = ccompiler.new_compiler(compiler=compiler, verbose=0)
customize_compiler(cc)
if cc.compiler_type == 'mingw32':
customize_mingw(cc)
else:
cc = compiler
customize_gcc(cc)
for name, val in compiler_attrs.items():
setattr(cc, name, val)
return cc | [
"def",
"get_compiler",
"(",
"compiler",
",",
"**",
"compiler_attrs",
")",
":",
"if",
"compiler",
"is",
"None",
"or",
"isinstance",
"(",
"compiler",
",",
"str",
")",
":",
"cc",
"=",
"ccompiler",
".",
"new_compiler",
"(",
"compiler",
"=",
"compiler",
",",
"verbose",
"=",
"0",
")",
"customize_compiler",
"(",
"cc",
")",
"if",
"cc",
".",
"compiler_type",
"==",
"'mingw32'",
":",
"customize_mingw",
"(",
"cc",
")",
"else",
":",
"cc",
"=",
"compiler",
"customize_gcc",
"(",
"cc",
")",
"for",
"name",
",",
"val",
"in",
"compiler_attrs",
".",
"items",
"(",
")",
":",
"setattr",
"(",
"cc",
",",
"name",
",",
"val",
")",
"return",
"cc"
] | python | get and customize a compiler | true |
2,633,359 | def make_naive(value, timezone):
"""
Makes an aware datetime.datetime naive in a given time zone.
"""
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# available for pytz time zones
value = timezone.normalize(value)
return value.replace(tzinfo=None) | [
"def",
"make_naive",
"(",
"value",
",",
"timezone",
")",
":",
"value",
"=",
"value",
".",
"astimezone",
"(",
"timezone",
")",
"if",
"hasattr",
"(",
"timezone",
",",
"'normalize'",
")",
":",
"value",
"=",
"timezone",
".",
"normalize",
"(",
"value",
")",
"return",
"value",
".",
"replace",
"(",
"tzinfo",
"=",
"None",
")"
] | python | Makes an aware datetime.datetime naive in a given time zone. | true |
2,633,629 | def print_http_nfc_lease_info(info):
""" Prints information about the lease,
such as the entity covered by the lease,
and HTTP URLs for up/downloading file backings.
:param info:
:type info: vim.HttpNfcLease.Info
:return:
"""
print 'Lease timeout: {0.leaseTimeout}\n' \
'Disk Capacity KB: {0.totalDiskCapacityInKB}'.format(info)
device_number = 1
if info.deviceUrl:
for device_url in info.deviceUrl:
print 'HttpNfcLeaseDeviceUrl: {1}\n' \
'Device URL Import Key: {0.importKey}\n' \
'Device URL Key: {0.key}\n' \
'Device URL: {0.url}\n' \
'Device URL Size: {0.fileSize}\n' \
'SSL Thumbprint: {0.sslThumbprint}\n'.format(device_url,
device_number)
if not device_url.targetId:
print "No targetId found for this device"
print "Device is not eligible for export. This could be a mounted iso or img of some sort"
print "It will NOT be downloaded\n"
device_number += 1
else:
print 'No devices were found.' | [
"def",
"print_http_nfc_lease_info",
"(",
"info",
")",
":",
"print",
"'Lease timeout: {0.leaseTimeout}\\n'",
"'Disk Capacity KB: {0.totalDiskCapacityInKB}'",
".",
"format",
"(",
"info",
")",
"device_number",
"=",
"1",
"if",
"info",
".",
"deviceUrl",
":",
"for",
"device_url",
"in",
"info",
".",
"deviceUrl",
":",
"print",
"'HttpNfcLeaseDeviceUrl: {1}\\n'",
"'Device URL Import Key: {0.importKey}\\n'",
"'Device URL Key: {0.key}\\n'",
"'Device URL: {0.url}\\n'",
"'Device URL Size: {0.fileSize}\\n'",
"'SSL Thumbprint: {0.sslThumbprint}\\n'",
".",
"format",
"(",
"device_url",
",",
"device_number",
")",
"if",
"not",
"device_url",
".",
"targetId",
":",
"print",
"\"No targetId found for this device\"",
"print",
"\"Device is not eligible for export. This could be a mounted iso or img of some sort\"",
"print",
"\"It will NOT be downloaded\\n\"",
"device_number",
"+=",
"1",
"else",
":",
"print",
"'No devices were found.'"
] | python | Prints information about the lease,
such as the entity covered by the lease,
and HTTP URLs for up/downloading file backings.
:param info:
:type info: vim.HttpNfcLease.Info
:return: | true |
2,633,630 | def break_down_cookie(cookie):
""" Breaks down vSphere SOAP cookie
:param cookie: vSphere SOAP cookie
:type cookie: str
:return: Dictionary with cookie_name: cookie_value
"""
cookie_a = cookie.split(';')
cookie_name = cookie_a[0].split('=')[0]
cookie_text = ' {0}; ${1}'.format(cookie_a[0].split('=')[1],
cookie_a[1].lstrip())
return {cookie_name: cookie_text} | [
"def",
"break_down_cookie",
"(",
"cookie",
")",
":",
"cookie_a",
"=",
"cookie",
".",
"split",
"(",
"';'",
")",
"cookie_name",
"=",
"cookie_a",
"[",
"0",
"]",
".",
"split",
"(",
"'='",
")",
"[",
"0",
"]",
"cookie_text",
"=",
"' {0}; ${1}'",
".",
"format",
"(",
"cookie_a",
"[",
"0",
"]",
".",
"split",
"(",
"'='",
")",
"[",
"1",
"]",
",",
"cookie_a",
"[",
"1",
"]",
".",
"lstrip",
"(",
")",
")",
"return",
"{",
"cookie_name",
":",
"cookie_text",
"}"
] | python | Breaks down vSphere SOAP cookie
:param cookie: vSphere SOAP cookie
:type cookie: str
:return: Dictionary with cookie_name: cookie_value | true |
2,633,631 | def download_device(headers, cookies, temp_target_disk,
device_url, lease_updater,
total_bytes_written, total_bytes_to_write):
""" Download disk device of HttpNfcLease.info.deviceUrl
list of devices
:param headers: Request headers
:type cookies: dict
:param cookies: Request cookies (session)
:type cookies: dict
:param temp_target_disk: file name to write
:type temp_target_disk: str
:param device_url: deviceUrl.url
:type device_url: str
:param lease_updater:
:type lease_updater: LeaseProgressUpdater
:param total_bytes_written: Bytes written so far
:type total_bytes_to_write: long
:param total_bytes_to_write: VM unshared storage
:type total_bytes_to_write: long
:return:
"""
with open(temp_target_disk, 'wb') as handle:
response = requests.get(device_url, stream=True,
headers=headers,
cookies=cookies, verify=False)
# response other than 200
if not response.ok:
response.raise_for_status()
# keeping track of progress
current_bytes_written = 0
written_pct = 0
print "Exporting from vCenter..."
print ""
last_time = 0
for block in response.iter_content(chunk_size=1073741824):
# filter out keep-alive new chunks
if block:
handle.write(block)
handle.flush()
os.fsync(handle.fileno())
prev_pct = written_pct
# Percent is unreliable as i can't find a way to know the size of the disk compressed
current_bytes_written += len(block)
# written_pct_float = (float(current_bytes_written + total_bytes_written) / float(total_bytes_to_write) * 100)
# written_pct = int(written_pct_float)
# Only do the bytes to gb conversion every 5 seconds
if int(time.time()) > last_time + 3:
dl_in_mb = (total_bytes_written + current_bytes_written) /1024 /1024
last_time = int(time.time())
print ("\r {} Mb Downloaded ".format(dl_in_mb)),
# if written_pct > prev_pct:
# lease_updater.progressPercent = int(written_pct)
return current_bytes_written | [
"def",
"download_device",
"(",
"headers",
",",
"cookies",
",",
"temp_target_disk",
",",
"device_url",
",",
"lease_updater",
",",
"total_bytes_written",
",",
"total_bytes_to_write",
")",
":",
"with",
"open",
"(",
"temp_target_disk",
",",
"'wb'",
")",
"as",
"handle",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"device_url",
",",
"stream",
"=",
"True",
",",
"headers",
"=",
"headers",
",",
"cookies",
"=",
"cookies",
",",
"verify",
"=",
"False",
")",
"if",
"not",
"response",
".",
"ok",
":",
"response",
".",
"raise_for_status",
"(",
")",
"current_bytes_written",
"=",
"0",
"written_pct",
"=",
"0",
"print",
"\"Exporting from vCenter...\"",
"print",
"\"\"",
"last_time",
"=",
"0",
"for",
"block",
"in",
"response",
".",
"iter_content",
"(",
"chunk_size",
"=",
"1073741824",
")",
":",
"if",
"block",
":",
"handle",
".",
"write",
"(",
"block",
")",
"handle",
".",
"flush",
"(",
")",
"os",
".",
"fsync",
"(",
"handle",
".",
"fileno",
"(",
")",
")",
"prev_pct",
"=",
"written_pct",
"current_bytes_written",
"+=",
"len",
"(",
"block",
")",
"if",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
">",
"last_time",
"+",
"3",
":",
"dl_in_mb",
"=",
"(",
"total_bytes_written",
"+",
"current_bytes_written",
")",
"/",
"1024",
"/",
"1024",
"last_time",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
"print",
"(",
"\"\\r {} Mb Downloaded \"",
".",
"format",
"(",
"dl_in_mb",
")",
")",
",",
"return",
"current_bytes_written"
] | python | Download disk device of HttpNfcLease.info.deviceUrl
list of devices
:param headers: Request headers
:type cookies: dict
:param cookies: Request cookies (session)
:type cookies: dict
:param temp_target_disk: file name to write
:type temp_target_disk: str
:param device_url: deviceUrl.url
:type device_url: str
:param lease_updater:
:type lease_updater: LeaseProgressUpdater
:param total_bytes_written: Bytes written so far
:type total_bytes_to_write: long
:param total_bytes_to_write: VM unshared storage
:type total_bytes_to_write: long
:return: | true |
2,634,740 | def extend_config(config, config_items):
"""
We are handling config value setting like this for a cleaner api.
Users just need to pass in a named param to this source and we can
dynamically generate a config object for it.
"""
for key, val in list(config_items.items()):
if hasattr(config, key):
setattr(config, key, val)
return config | [
"def",
"extend_config",
"(",
"config",
",",
"config_items",
")",
":",
"for",
"key",
",",
"val",
"in",
"list",
"(",
"config_items",
".",
"items",
"(",
")",
")",
":",
"if",
"hasattr",
"(",
"config",
",",
"key",
")",
":",
"setattr",
"(",
"config",
",",
"key",
",",
"val",
")",
"return",
"config"
] | python | We are handling config value setting like this for a cleaner api.
Users just need to pass in a named param to this source and we can
dynamically generate a config object for it. | true |
2,636,476 | def fetch_archive(savedir, url, fname, checksum, force=False):
"""download an archive to a specific location"""
dest = pjoin(savedir, fname)
scheme, digest_ref = checksum.split(':')
if os.path.exists(dest) and not force:
info("already have %s" % dest)
digest = checksum_file(scheme, fname)
if digest == digest_ref:
return dest
else:
warn("but checksum %s != %s, redownloading." % (digest, digest_ref))
os.remove(fname)
info("fetching %s into %s" % (url, savedir))
if not os.path.exists(savedir):
os.makedirs(savedir)
req = urlopen(url)
with open(dest, 'wb') as f:
f.write(req.read())
digest = checksum_file(scheme, dest)
if digest != digest_ref:
fatal("%s %s mismatch:\nExpected: %s\nActual : %s" % (
dest, scheme, digest_ref, digest))
return dest | [
"def",
"fetch_archive",
"(",
"savedir",
",",
"url",
",",
"fname",
",",
"checksum",
",",
"force",
"=",
"False",
")",
":",
"dest",
"=",
"pjoin",
"(",
"savedir",
",",
"fname",
")",
"scheme",
",",
"digest_ref",
"=",
"checksum",
".",
"split",
"(",
"':'",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"dest",
")",
"and",
"not",
"force",
":",
"info",
"(",
"\"already have %s\"",
"%",
"dest",
")",
"digest",
"=",
"checksum_file",
"(",
"scheme",
",",
"fname",
")",
"if",
"digest",
"==",
"digest_ref",
":",
"return",
"dest",
"else",
":",
"warn",
"(",
"\"but checksum %s != %s, redownloading.\"",
"%",
"(",
"digest",
",",
"digest_ref",
")",
")",
"os",
".",
"remove",
"(",
"fname",
")",
"info",
"(",
"\"fetching %s into %s\"",
"%",
"(",
"url",
",",
"savedir",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"savedir",
")",
":",
"os",
".",
"makedirs",
"(",
"savedir",
")",
"req",
"=",
"urlopen",
"(",
"url",
")",
"with",
"open",
"(",
"dest",
",",
"'wb'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"req",
".",
"read",
"(",
")",
")",
"digest",
"=",
"checksum_file",
"(",
"scheme",
",",
"dest",
")",
"if",
"digest",
"!=",
"digest_ref",
":",
"fatal",
"(",
"\"%s %s mismatch:\\nExpected: %s\\nActual : %s\"",
"%",
"(",
"dest",
",",
"scheme",
",",
"digest_ref",
",",
"digest",
")",
")",
"return",
"dest"
] | python | download an archive to a specific location | true |
2,636,477 | def fetch_libzmq(savedir):
"""download and extract libzmq"""
dest = pjoin(savedir, 'zeromq')
if os.path.exists(dest):
info("already have %s" % dest)
return
path = fetch_archive(savedir, libzmq_url, fname=libzmq, checksum=libzmq_checksum)
tf = tarfile.open(path)
with_version = pjoin(savedir, tf.firstmember.path)
tf.extractall(savedir)
tf.close()
# remove version suffix:
shutil.move(with_version, dest) | [
"def",
"fetch_libzmq",
"(",
"savedir",
")",
":",
"dest",
"=",
"pjoin",
"(",
"savedir",
",",
"'zeromq'",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"dest",
")",
":",
"info",
"(",
"\"already have %s\"",
"%",
"dest",
")",
"return",
"path",
"=",
"fetch_archive",
"(",
"savedir",
",",
"libzmq_url",
",",
"fname",
"=",
"libzmq",
",",
"checksum",
"=",
"libzmq_checksum",
")",
"tf",
"=",
"tarfile",
".",
"open",
"(",
"path",
")",
"with_version",
"=",
"pjoin",
"(",
"savedir",
",",
"tf",
".",
"firstmember",
".",
"path",
")",
"tf",
".",
"extractall",
"(",
"savedir",
")",
"tf",
".",
"close",
"(",
")",
"shutil",
".",
"move",
"(",
"with_version",
",",
"dest",
")"
] | python | download and extract libzmq | true |
2,637,322 | def validipaddr(address):
"""
Returns True if `address` is a valid IPv4 address.
>>> validipaddr('192.168.1.1')
True
>>> validipaddr('192.168.1.800')
False
>>> validipaddr('192.168.1')
False
"""
try:
octets = address.split('.')
if len(octets) != 4:
return False
for x in octets:
if not (0 <= int(x) <= 255):
return False
except ValueError:
return False
return True | [
"def",
"validipaddr",
"(",
"address",
")",
":",
"try",
":",
"octets",
"=",
"address",
".",
"split",
"(",
"'.'",
")",
"if",
"len",
"(",
"octets",
")",
"!=",
"4",
":",
"return",
"False",
"for",
"x",
"in",
"octets",
":",
"if",
"not",
"(",
"0",
"<=",
"int",
"(",
"x",
")",
"<=",
"255",
")",
":",
"return",
"False",
"except",
"ValueError",
":",
"return",
"False",
"return",
"True"
] | python | Returns True if `address` is a valid IPv4 address.
>>> validipaddr('192.168.1.1')
True
>>> validipaddr('192.168.1.800')
False
>>> validipaddr('192.168.1')
False | true |
2,637,323 | def validip(ip, defaultaddr="0.0.0.0", defaultport=8080):
"""Returns `(ip_address, port)` from string `ip_addr_port`"""
addr = defaultaddr
port = defaultport
ip = ip.split(":", 1)
if len(ip) == 1:
if not ip[0]:
pass
elif validipaddr(ip[0]):
addr = ip[0]
elif validipport(ip[0]):
port = int(ip[0])
else:
raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
elif len(ip) == 2:
addr, port = ip
if not validipaddr(addr) and validipport(port):
raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
port = int(port)
else:
raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
return (addr, port) | [
"def",
"validip",
"(",
"ip",
",",
"defaultaddr",
"=",
"\"0.0.0.0\"",
",",
"defaultport",
"=",
"8080",
")",
":",
"addr",
"=",
"defaultaddr",
"port",
"=",
"defaultport",
"ip",
"=",
"ip",
".",
"split",
"(",
"\":\"",
",",
"1",
")",
"if",
"len",
"(",
"ip",
")",
"==",
"1",
":",
"if",
"not",
"ip",
"[",
"0",
"]",
":",
"pass",
"elif",
"validipaddr",
"(",
"ip",
"[",
"0",
"]",
")",
":",
"addr",
"=",
"ip",
"[",
"0",
"]",
"elif",
"validipport",
"(",
"ip",
"[",
"0",
"]",
")",
":",
"port",
"=",
"int",
"(",
"ip",
"[",
"0",
"]",
")",
"else",
":",
"raise",
"ValueError",
",",
"':'",
".",
"join",
"(",
"ip",
")",
"+",
"' is not a valid IP address/port'",
"elif",
"len",
"(",
"ip",
")",
"==",
"2",
":",
"addr",
",",
"port",
"=",
"ip",
"if",
"not",
"validipaddr",
"(",
"addr",
")",
"and",
"validipport",
"(",
"port",
")",
":",
"raise",
"ValueError",
",",
"':'",
".",
"join",
"(",
"ip",
")",
"+",
"' is not a valid IP address/port'",
"port",
"=",
"int",
"(",
"port",
")",
"else",
":",
"raise",
"ValueError",
",",
"':'",
".",
"join",
"(",
"ip",
")",
"+",
"' is not a valid IP address/port'",
"return",
"(",
"addr",
",",
"port",
")"
] | python | Returns `(ip_address, port)` from string `ip_addr_port` | true |
2,637,324 | def urlquote(val):
"""
Quotes a string for use in a URL.
>>> urlquote('://?f=1&j=1')
'%3A//%3Ff%3D1%26j%3D1'
>>> urlquote(None)
''
>>> urlquote(u'\u203d')
'%E2%80%BD'
"""
if val is None: return ''
if not isinstance(val, unicode): val = str(val)
else: val = val.encode('utf-8')
return urllib.quote(val) | [
"def",
"urlquote",
"(",
"val",
")",
":",
"if",
"val",
"is",
"None",
":",
"return",
"''",
"if",
"not",
"isinstance",
"(",
"val",
",",
"unicode",
")",
":",
"val",
"=",
"str",
"(",
"val",
")",
"else",
":",
"val",
"=",
"val",
".",
"encode",
"(",
"'utf-8'",
")",
"return",
"urllib",
".",
"quote",
"(",
"val",
")"
] | python | Quotes a string for use in a URL.
>>> urlquote('://?f=1&j=1')
'%3A//%3Ff%3D1%26j%3D1'
>>> urlquote(None)
''
>>> urlquote(u'\u203d')
'%E2%80%BD' | true |
2,637,325 | def parsehttpdate(string_):
"""
Parses an HTTP date into a datetime object.
>>> parsehttpdate('Thu, 01 Jan 1970 01:01:01 GMT')
datetime.datetime(1970, 1, 1, 1, 1, 1)
"""
try:
t = time.strptime(string_, "%a, %d %b %Y %H:%M:%S %Z")
except ValueError:
return None
return datetime.datetime(*t[:6]) | [
"def",
"parsehttpdate",
"(",
"string_",
")",
":",
"try",
":",
"t",
"=",
"time",
".",
"strptime",
"(",
"string_",
",",
"\"%a, %d %b %Y %H:%M:%S %Z\"",
")",
"except",
"ValueError",
":",
"return",
"None",
"return",
"datetime",
".",
"datetime",
"(",
"*",
"t",
"[",
":",
"6",
"]",
")"
] | python | Parses an HTTP date into a datetime object.
>>> parsehttpdate('Thu, 01 Jan 1970 01:01:01 GMT')
datetime.datetime(1970, 1, 1, 1, 1, 1) | true |
2,639,790 | def import_dotted_path(path):
"""
Takes a dotted path to a member name in a module, and returns
the member after importing it.
"""
# stolen from Mezzanine (mezzanine.utils.importing.import_dotted_path)
try:
module_path, member_name = path.rsplit(".", 1)
module = import_module(module_path)
return getattr(module, member_name)
except (ValueError, ImportError, AttributeError) as e:
raise ImportError('Could not import the name: {}: {}'.format(path, e)) | [
"def",
"import_dotted_path",
"(",
"path",
")",
":",
"try",
":",
"module_path",
",",
"member_name",
"=",
"path",
".",
"rsplit",
"(",
"\".\"",
",",
"1",
")",
"module",
"=",
"import_module",
"(",
"module_path",
")",
"return",
"getattr",
"(",
"module",
",",
"member_name",
")",
"except",
"(",
"ValueError",
",",
"ImportError",
",",
"AttributeError",
")",
"as",
"e",
":",
"raise",
"ImportError",
"(",
"'Could not import the name: {}: {}'",
".",
"format",
"(",
"path",
",",
"e",
")",
")"
] | python | Takes a dotted path to a member name in a module, and returns
the member after importing it. | true |
2,642,253 | def normalize_excludes(rootpath, excludes):
"""Normalize the excluded directory list."""
return [path.normpath(path.abspath(exclude)) for exclude in excludes] | [
"def",
"normalize_excludes",
"(",
"rootpath",
",",
"excludes",
")",
":",
"return",
"[",
"path",
".",
"normpath",
"(",
"path",
".",
"abspath",
"(",
"exclude",
")",
")",
"for",
"exclude",
"in",
"excludes",
"]"
] | python | Normalize the excluded directory list. | true |