id_within_dataset
int64 46
2.71M
| snippet
stringlengths 63
481k
| tokens
sequencelengths 20
15.6k
| language
stringclasses 2
values | nl
stringlengths 1
32.4k
| is_duplicated
bool 2
classes |
---|---|---|---|---|---|
2,465,451 | def uninstallation_paths(dist):
"""
Yield all the uninstallation paths for dist based on RECORD-without-.pyc
Yield paths to all the files in RECORD. For each .py file in RECORD, add
the .pyc in the same directory.
UninstallPathSet.add() takes care of the __pycache__ .pyc.
"""
from pip.utils import FakeFile # circular import
r = csv.reader(FakeFile(dist.get_metadata_lines('RECORD')))
for row in r:
path = os.path.join(dist.location, row[0])
yield path
if path.endswith('.py'):
dn, fn = os.path.split(path)
base = fn[:-3]
path = os.path.join(dn, base + '.pyc')
yield path | [
"def",
"uninstallation_paths",
"(",
"dist",
")",
":",
"from",
"pip",
".",
"utils",
"import",
"FakeFile",
"r",
"=",
"csv",
".",
"reader",
"(",
"FakeFile",
"(",
"dist",
".",
"get_metadata_lines",
"(",
"'RECORD'",
")",
")",
")",
"for",
"row",
"in",
"r",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dist",
".",
"location",
",",
"row",
"[",
"0",
"]",
")",
"yield",
"path",
"if",
"path",
".",
"endswith",
"(",
"'.py'",
")",
":",
"dn",
",",
"fn",
"=",
"os",
".",
"path",
".",
"split",
"(",
"path",
")",
"base",
"=",
"fn",
"[",
":",
"-",
"3",
"]",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dn",
",",
"base",
"+",
"'.pyc'",
")",
"yield",
"path"
] | python | Yield all the uninstallation paths for dist based on RECORD-without-.pyc
Yield paths to all the files in RECORD. For each .py file in RECORD, add
the .pyc in the same directory.
UninstallPathSet.add() takes care of the __pycache__ .pyc. | true |
2,465,550 | def iter_symbols(code):
"""Yield names and strings used by `code` and its nested code objects"""
for name in code.co_names:
yield name
for const in code.co_consts:
if isinstance(const, basestring):
yield const
elif isinstance(const, CodeType):
for name in iter_symbols(const):
yield name | [
"def",
"iter_symbols",
"(",
"code",
")",
":",
"for",
"name",
"in",
"code",
".",
"co_names",
":",
"yield",
"name",
"for",
"const",
"in",
"code",
".",
"co_consts",
":",
"if",
"isinstance",
"(",
"const",
",",
"basestring",
")",
":",
"yield",
"const",
"elif",
"isinstance",
"(",
"const",
",",
"CodeType",
")",
":",
"for",
"name",
"in",
"iter_symbols",
"(",
"const",
")",
":",
"yield",
"name"
] | python | Yield names and strings used by `code` and its nested code objects | true |
2,465,691 | def write_delete_marker_file(directory):
"""
Write the pip delete marker file into this directory.
"""
filepath = os.path.join(directory, PIP_DELETE_MARKER_FILENAME)
with open(filepath, 'w') as marker_fp:
marker_fp.write(DELETE_MARKER_MESSAGE) | [
"def",
"write_delete_marker_file",
"(",
"directory",
")",
":",
"filepath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"PIP_DELETE_MARKER_FILENAME",
")",
"with",
"open",
"(",
"filepath",
",",
"'w'",
")",
"as",
"marker_fp",
":",
"marker_fp",
".",
"write",
"(",
"DELETE_MARKER_MESSAGE",
")"
] | python | Write the pip delete marker file into this directory. | true |
2,465,775 | def get_win_launcher(type):
"""
Load the Windows launcher (executable) suitable for launching a script.
`type` should be either 'cli' or 'gui'
Returns the executable as a byte string.
"""
launcher_fn = '%s.exe' % type
if platform.machine().lower() == 'arm':
launcher_fn = launcher_fn.replace(".", "-arm.")
if is_64bit():
launcher_fn = launcher_fn.replace(".", "-64.")
else:
launcher_fn = launcher_fn.replace(".", "-32.")
return resource_string('setuptools', launcher_fn) | [
"def",
"get_win_launcher",
"(",
"type",
")",
":",
"launcher_fn",
"=",
"'%s.exe'",
"%",
"type",
"if",
"platform",
".",
"machine",
"(",
")",
".",
"lower",
"(",
")",
"==",
"'arm'",
":",
"launcher_fn",
"=",
"launcher_fn",
".",
"replace",
"(",
"\".\"",
",",
"\"-arm.\"",
")",
"if",
"is_64bit",
"(",
")",
":",
"launcher_fn",
"=",
"launcher_fn",
".",
"replace",
"(",
"\".\"",
",",
"\"-64.\"",
")",
"else",
":",
"launcher_fn",
"=",
"launcher_fn",
".",
"replace",
"(",
"\".\"",
",",
"\"-32.\"",
")",
"return",
"resource_string",
"(",
"'setuptools'",
",",
"launcher_fn",
")"
] | python | Load the Windows launcher (executable) suitable for launching a script.
`type` should be either 'cli' or 'gui'
Returns the executable as a byte string. | true |
2,466,300 | def get_default_cache():
"""Determine the default cache location
This returns the ``PYTHON_EGG_CACHE`` environment variable, if set.
Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the
"Application Data" directory. On all other systems, it's "~/.python-eggs".
"""
try:
return os.environ['PYTHON_EGG_CACHE']
except KeyError:
pass
if os.name!='nt':
return os.path.expanduser('~/.python-eggs')
# XXX this may be locale-specific!
app_data = 'Application Data'
app_homes = [
# best option, should be locale-safe
(('APPDATA',), None),
(('USERPROFILE',), app_data),
(('HOMEDRIVE','HOMEPATH'), app_data),
(('HOMEPATH',), app_data),
(('HOME',), None),
# 95/98/ME
(('WINDIR',), app_data),
]
for keys, subdir in app_homes:
dirname = ''
for key in keys:
if key in os.environ:
dirname = os.path.join(dirname, os.environ[key])
else:
break
else:
if subdir:
dirname = os.path.join(dirname, subdir)
return os.path.join(dirname, 'Python-Eggs')
else:
raise RuntimeError(
"Please set the PYTHON_EGG_CACHE enviroment variable"
) | [
"def",
"get_default_cache",
"(",
")",
":",
"try",
":",
"return",
"os",
".",
"environ",
"[",
"'PYTHON_EGG_CACHE'",
"]",
"except",
"KeyError",
":",
"pass",
"if",
"os",
".",
"name",
"!=",
"'nt'",
":",
"return",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~/.python-eggs'",
")",
"app_data",
"=",
"'Application Data'",
"app_homes",
"=",
"[",
"(",
"(",
"'APPDATA'",
",",
")",
",",
"None",
")",
",",
"(",
"(",
"'USERPROFILE'",
",",
")",
",",
"app_data",
")",
",",
"(",
"(",
"'HOMEDRIVE'",
",",
"'HOMEPATH'",
")",
",",
"app_data",
")",
",",
"(",
"(",
"'HOMEPATH'",
",",
")",
",",
"app_data",
")",
",",
"(",
"(",
"'HOME'",
",",
")",
",",
"None",
")",
",",
"(",
"(",
"'WINDIR'",
",",
")",
",",
"app_data",
")",
",",
"]",
"for",
"keys",
",",
"subdir",
"in",
"app_homes",
":",
"dirname",
"=",
"''",
"for",
"key",
"in",
"keys",
":",
"if",
"key",
"in",
"os",
".",
"environ",
":",
"dirname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dirname",
",",
"os",
".",
"environ",
"[",
"key",
"]",
")",
"else",
":",
"break",
"else",
":",
"if",
"subdir",
":",
"dirname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dirname",
",",
"subdir",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"dirname",
",",
"'Python-Eggs'",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"\"Please set the PYTHON_EGG_CACHE enviroment variable\"",
")"
] | python | Determine the default cache location
This returns the ``PYTHON_EGG_CACHE`` environment variable, if set.
Otherwise, on Windows, it returns a "Python-Eggs" subdirectory of the
"Application Data" directory. On all other systems, it's "~/.python-eggs". | true |
2,466,301 | def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
# don't yield nested distros
return
for subitem in metadata.resource_listdir('/'):
if subitem.endswith('.egg'):
subpath = os.path.join(path_item, subitem)
for dist in find_eggs_in_zip(zipimport.zipimporter(subpath), subpath):
yield dist | [
"def",
"find_eggs_in_zip",
"(",
"importer",
",",
"path_item",
",",
"only",
"=",
"False",
")",
":",
"if",
"importer",
".",
"archive",
".",
"endswith",
"(",
"'.whl'",
")",
":",
"return",
"metadata",
"=",
"EggMetadata",
"(",
"importer",
")",
"if",
"metadata",
".",
"has_metadata",
"(",
"'PKG-INFO'",
")",
":",
"yield",
"Distribution",
".",
"from_filename",
"(",
"path_item",
",",
"metadata",
"=",
"metadata",
")",
"if",
"only",
":",
"return",
"for",
"subitem",
"in",
"metadata",
".",
"resource_listdir",
"(",
"'/'",
")",
":",
"if",
"subitem",
".",
"endswith",
"(",
"'.egg'",
")",
":",
"subpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path_item",
",",
"subitem",
")",
"for",
"dist",
"in",
"find_eggs_in_zip",
"(",
"zipimport",
".",
"zipimporter",
"(",
"subpath",
")",
",",
"subpath",
")",
":",
"yield",
"dist"
] | python | Find eggs in zip files; possibly multiple nested eggs. | true |
2,466,303 | def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path, parent = sys.path, None
if '.' in packageName:
parent = '.'.join(packageName.split('.')[:-1])
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent,[]).append(packageName)
_namespace_packages.setdefault(packageName,[])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
imp.release_lock() | [
"def",
"declare_namespace",
"(",
"packageName",
")",
":",
"imp",
".",
"acquire_lock",
"(",
")",
"try",
":",
"if",
"packageName",
"in",
"_namespace_packages",
":",
"return",
"path",
",",
"parent",
"=",
"sys",
".",
"path",
",",
"None",
"if",
"'.'",
"in",
"packageName",
":",
"parent",
"=",
"'.'",
".",
"join",
"(",
"packageName",
".",
"split",
"(",
"'.'",
")",
"[",
":",
"-",
"1",
"]",
")",
"declare_namespace",
"(",
"parent",
")",
"if",
"parent",
"not",
"in",
"_namespace_packages",
":",
"__import__",
"(",
"parent",
")",
"try",
":",
"path",
"=",
"sys",
".",
"modules",
"[",
"parent",
"]",
".",
"__path__",
"except",
"AttributeError",
":",
"raise",
"TypeError",
"(",
"\"Not a package:\"",
",",
"parent",
")",
"_namespace_packages",
".",
"setdefault",
"(",
"parent",
",",
"[",
"]",
")",
".",
"append",
"(",
"packageName",
")",
"_namespace_packages",
".",
"setdefault",
"(",
"packageName",
",",
"[",
"]",
")",
"for",
"path_item",
"in",
"path",
":",
"_handle_ns",
"(",
"packageName",
",",
"path_item",
")",
"finally",
":",
"imp",
".",
"release_lock",
"(",
")"
] | python | Declare that package 'packageName' is a namespace package | true |
2,466,306 | def _get_mro(cls):
"""Get an mro for a type or classic class"""
if not isinstance(cls, type):
class cls(cls, object): pass
return cls.__mro__[1:]
return cls.__mro__ | [
"def",
"_get_mro",
"(",
"cls",
")",
":",
"if",
"not",
"isinstance",
"(",
"cls",
",",
"type",
")",
":",
"class",
"cls",
"(",
"cls",
",",
"object",
")",
":",
"pass",
"return",
"cls",
".",
"__mro__",
"[",
"1",
":",
"]",
"return",
"cls",
".",
"__mro__"
] | python | Get an mro for a type or classic class | true |
2,466,307 | def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
for t in _get_mro(getattr(ob, '__class__', type(ob))):
if t in registry:
return registry[t] | [
"def",
"_find_adapter",
"(",
"registry",
",",
"ob",
")",
":",
"for",
"t",
"in",
"_get_mro",
"(",
"getattr",
"(",
"ob",
",",
"'__class__'",
",",
"type",
"(",
"ob",
")",
")",
")",
":",
"if",
"t",
"in",
"registry",
":",
"return",
"registry",
"[",
"t",
"]"
] | python | Return an adapter factory for `ob` from `registry` | true |
2,466,308 | def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname) | [
"def",
"ensure_directory",
"(",
"path",
")",
":",
"dirname",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"dirname",
")",
":",
"os",
".",
"makedirs",
"(",
"dirname",
")"
] | python | Ensure that the parent directory of `path` exists | true |
2,466,438 | def _create_modulename(cdef_sources, source, sys_version):
"""
This is the same as CFFI's create modulename except we don't include the
CFFI version.
"""
key = '\x00'.join([sys_version[:3], source, cdef_sources])
key = key.encode('utf-8')
k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff)
k1 = k1.lstrip('0x').rstrip('L')
k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff)
k2 = k2.lstrip('0').rstrip('L')
return '_xprintidle_cffi_{0}{1}'.format(k1, k2) | [
"def",
"_create_modulename",
"(",
"cdef_sources",
",",
"source",
",",
"sys_version",
")",
":",
"key",
"=",
"'\\x00'",
".",
"join",
"(",
"[",
"sys_version",
"[",
":",
"3",
"]",
",",
"source",
",",
"cdef_sources",
"]",
")",
"key",
"=",
"key",
".",
"encode",
"(",
"'utf-8'",
")",
"k1",
"=",
"hex",
"(",
"binascii",
".",
"crc32",
"(",
"key",
"[",
"0",
":",
":",
"2",
"]",
")",
"&",
"0xffffffff",
")",
"k1",
"=",
"k1",
".",
"lstrip",
"(",
"'0x'",
")",
".",
"rstrip",
"(",
"'L'",
")",
"k2",
"=",
"hex",
"(",
"binascii",
".",
"crc32",
"(",
"key",
"[",
"1",
":",
":",
"2",
"]",
")",
"&",
"0xffffffff",
")",
"k2",
"=",
"k2",
".",
"lstrip",
"(",
"'0'",
")",
".",
"rstrip",
"(",
"'L'",
")",
"return",
"'_xprintidle_cffi_{0}{1}'",
".",
"format",
"(",
"k1",
",",
"k2",
")"
] | python | This is the same as CFFI's create modulename except we don't include the
CFFI version. | true |
2,466,454 | def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be a string, or a (possibly-nested) iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
def scan_list(ITEM, TERMINATOR, line, p, groups, item_name):
items = []
while not TERMINATOR(line, p):
if CONTINUE(line, p):
try:
line = next(lines)
p = 0
except StopIteration:
msg = "\\ must not appear on the last nonblank line"
raise RequirementParseError(msg)
match = ITEM(line, p)
if not match:
msg = "Expected " + item_name + " in"
raise RequirementParseError(msg, line, "at", line[p:])
items.append(match.group(*groups))
p = match.end()
match = COMMA(line, p)
if match:
# skip the comma
p = match.end()
elif not TERMINATOR(line, p):
msg = "Expected ',' or end-of-list in"
raise RequirementParseError(msg, line, "at", line[p:])
match = TERMINATOR(line, p)
# skip the terminator, if any
if match:
p = match.end()
return line, p, items
for line in lines:
match = DISTRO(line)
if not match:
raise RequirementParseError("Missing distribution spec", line)
project_name = match.group(1)
p = match.end()
extras = []
match = OBRACKET(line, p)
if match:
p = match.end()
line, p, extras = scan_list(
DISTRO, CBRACKET, line, p, (1,), "'extra' name"
)
line, p, specs = scan_list(VERSION, LINE_END, line, p, (1, 2),
"version spec")
specs = [(op, val) for op, val in specs]
yield Requirement(project_name, specs, extras) | [
"def",
"parse_requirements",
"(",
"strs",
")",
":",
"lines",
"=",
"iter",
"(",
"yield_lines",
"(",
"strs",
")",
")",
"def",
"scan_list",
"(",
"ITEM",
",",
"TERMINATOR",
",",
"line",
",",
"p",
",",
"groups",
",",
"item_name",
")",
":",
"items",
"=",
"[",
"]",
"while",
"not",
"TERMINATOR",
"(",
"line",
",",
"p",
")",
":",
"if",
"CONTINUE",
"(",
"line",
",",
"p",
")",
":",
"try",
":",
"line",
"=",
"next",
"(",
"lines",
")",
"p",
"=",
"0",
"except",
"StopIteration",
":",
"msg",
"=",
"\"\\\\ must not appear on the last nonblank line\"",
"raise",
"RequirementParseError",
"(",
"msg",
")",
"match",
"=",
"ITEM",
"(",
"line",
",",
"p",
")",
"if",
"not",
"match",
":",
"msg",
"=",
"\"Expected \"",
"+",
"item_name",
"+",
"\" in\"",
"raise",
"RequirementParseError",
"(",
"msg",
",",
"line",
",",
"\"at\"",
",",
"line",
"[",
"p",
":",
"]",
")",
"items",
".",
"append",
"(",
"match",
".",
"group",
"(",
"*",
"groups",
")",
")",
"p",
"=",
"match",
".",
"end",
"(",
")",
"match",
"=",
"COMMA",
"(",
"line",
",",
"p",
")",
"if",
"match",
":",
"p",
"=",
"match",
".",
"end",
"(",
")",
"elif",
"not",
"TERMINATOR",
"(",
"line",
",",
"p",
")",
":",
"msg",
"=",
"\"Expected ',' or end-of-list in\"",
"raise",
"RequirementParseError",
"(",
"msg",
",",
"line",
",",
"\"at\"",
",",
"line",
"[",
"p",
":",
"]",
")",
"match",
"=",
"TERMINATOR",
"(",
"line",
",",
"p",
")",
"if",
"match",
":",
"p",
"=",
"match",
".",
"end",
"(",
")",
"return",
"line",
",",
"p",
",",
"items",
"for",
"line",
"in",
"lines",
":",
"match",
"=",
"DISTRO",
"(",
"line",
")",
"if",
"not",
"match",
":",
"raise",
"RequirementParseError",
"(",
"\"Missing distribution spec\"",
",",
"line",
")",
"project_name",
"=",
"match",
".",
"group",
"(",
"1",
")",
"p",
"=",
"match",
".",
"end",
"(",
")",
"extras",
"=",
"[",
"]",
"match",
"=",
"OBRACKET",
"(",
"line",
",",
"p",
")",
"if",
"match",
":",
"p",
"=",
"match",
".",
"end",
"(",
")",
"line",
",",
"p",
",",
"extras",
"=",
"scan_list",
"(",
"DISTRO",
",",
"CBRACKET",
",",
"line",
",",
"p",
",",
"(",
"1",
",",
")",
",",
"\"'extra' name\"",
")",
"line",
",",
"p",
",",
"specs",
"=",
"scan_list",
"(",
"VERSION",
",",
"LINE_END",
",",
"line",
",",
"p",
",",
"(",
"1",
",",
"2",
")",
",",
"\"version spec\"",
")",
"specs",
"=",
"[",
"(",
"op",
",",
"val",
")",
"for",
"op",
",",
"val",
"in",
"specs",
"]",
"yield",
"Requirement",
"(",
"project_name",
",",
"specs",
",",
"extras",
")"
] | python | Yield ``Requirement`` objects for each specification in `strs`
`strs` must be a string, or a (possibly-nested) iterable thereof. | true |
2,466,455 | def _initialize(g=globals()):
"Set up global resource manager (deliberately not state-saved)"
manager = ResourceManager()
g['_manager'] = manager
for name in dir(manager):
if not name.startswith('_'):
g[name] = getattr(manager, name) | [
"def",
"_initialize",
"(",
"g",
"=",
"globals",
"(",
")",
")",
":",
"manager",
"=",
"ResourceManager",
"(",
")",
"g",
"[",
"'_manager'",
"]",
"=",
"manager",
"for",
"name",
"in",
"dir",
"(",
"manager",
")",
":",
"if",
"not",
"name",
".",
"startswith",
"(",
"'_'",
")",
":",
"g",
"[",
"name",
"]",
"=",
"getattr",
"(",
"manager",
",",
"name",
")"
] | python | Set up global resource manager (deliberately not state-saved) | true |
2,466,456 | def _initialize_master_working_set():
"""
Prepare the master working set and make the ``require()``
API available.
This function has explicit effects on the global state
of pkg_resources. It is intended to be invoked once at
the initialization of this module.
Invocation by other packages is unsupported and done
at their own risk.
"""
working_set = WorkingSet._build_master()
_declare_state('object', working_set=working_set)
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
# backward compatibility
run_main = run_script
# Activate all distributions already on sys.path, and ensure that
# all distributions added to the working set in the future (e.g. by
# calling ``require()``) will get activated as well.
add_activation_listener(lambda dist: dist.activate())
working_set.entries=[]
# match order
list(map(working_set.add_entry, sys.path))
globals().update(locals()) | [
"def",
"_initialize_master_working_set",
"(",
")",
":",
"working_set",
"=",
"WorkingSet",
".",
"_build_master",
"(",
")",
"_declare_state",
"(",
"'object'",
",",
"working_set",
"=",
"working_set",
")",
"require",
"=",
"working_set",
".",
"require",
"iter_entry_points",
"=",
"working_set",
".",
"iter_entry_points",
"add_activation_listener",
"=",
"working_set",
".",
"subscribe",
"run_script",
"=",
"working_set",
".",
"run_script",
"run_main",
"=",
"run_script",
"add_activation_listener",
"(",
"lambda",
"dist",
":",
"dist",
".",
"activate",
"(",
")",
")",
"working_set",
".",
"entries",
"=",
"[",
"]",
"list",
"(",
"map",
"(",
"working_set",
".",
"add_entry",
",",
"sys",
".",
"path",
")",
")",
"globals",
"(",
")",
".",
"update",
"(",
"locals",
"(",
")",
")"
] | python | Prepare the master working set and make the ``require()``
API available.
This function has explicit effects on the global state
of pkg_resources. It is intended to be invoked once at
the initialization of this module.
Invocation by other packages is unsupported and done
at their own risk. | true |
2,466,518 | def _get_unpatched(cls):
"""Protect against re-patching the distutils if reloaded
Also ensures that no other distutils extension monkeypatched the distutils
first.
"""
while cls.__module__.startswith('setuptools'):
cls, = cls.__bases__
if not cls.__module__.startswith('distutils'):
raise AssertionError(
"distutils has already been patched by %r" % cls
)
return cls | [
"def",
"_get_unpatched",
"(",
"cls",
")",
":",
"while",
"cls",
".",
"__module__",
".",
"startswith",
"(",
"'setuptools'",
")",
":",
"cls",
",",
"=",
"cls",
".",
"__bases__",
"if",
"not",
"cls",
".",
"__module__",
".",
"startswith",
"(",
"'distutils'",
")",
":",
"raise",
"AssertionError",
"(",
"\"distutils has already been patched by %r\"",
"%",
"cls",
")",
"return",
"cls"
] | python | Protect against re-patching the distutils if reloaded
Also ensures that no other distutils extension monkeypatched the distutils
first. | true |
2,466,520 | def check_extras(dist, attr, value):
"""Verify that extras_require mapping is valid"""
try:
for k,v in value.items():
if ':' in k:
k,m = k.split(':',1)
if pkg_resources.invalid_marker(m):
raise DistutilsSetupError("Invalid environment marker: "+m)
list(pkg_resources.parse_requirements(v))
except (TypeError,ValueError,AttributeError):
raise DistutilsSetupError(
"'extras_require' must be a dictionary whose values are "
"strings or lists of strings containing valid project/version "
"requirement specifiers."
) | [
"def",
"check_extras",
"(",
"dist",
",",
"attr",
",",
"value",
")",
":",
"try",
":",
"for",
"k",
",",
"v",
"in",
"value",
".",
"items",
"(",
")",
":",
"if",
"':'",
"in",
"k",
":",
"k",
",",
"m",
"=",
"k",
".",
"split",
"(",
"':'",
",",
"1",
")",
"if",
"pkg_resources",
".",
"invalid_marker",
"(",
"m",
")",
":",
"raise",
"DistutilsSetupError",
"(",
"\"Invalid environment marker: \"",
"+",
"m",
")",
"list",
"(",
"pkg_resources",
".",
"parse_requirements",
"(",
"v",
")",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
",",
"AttributeError",
")",
":",
"raise",
"DistutilsSetupError",
"(",
"\"'extras_require' must be a dictionary whose values are \"",
"\"strings or lists of strings containing valid project/version \"",
"\"requirement specifiers.\"",
")"
] | python | Verify that extras_require mapping is valid | true |
2,466,574 | def attach_enctype_error_multidict(request):
"""Since Flask 0.8 we're monkeypatching the files object in case a
request is detected that does not use multipart form data but the files
object is accessed.
"""
oldcls = request.files.__class__
class newcls(oldcls):
def __getitem__(self, key):
try:
return oldcls.__getitem__(self, key)
except KeyError as e:
if key not in request.form:
raise
raise DebugFilesKeyError(request, key)
newcls.__name__ = oldcls.__name__
newcls.__module__ = oldcls.__module__
request.files.__class__ = newcls | [
"def",
"attach_enctype_error_multidict",
"(",
"request",
")",
":",
"oldcls",
"=",
"request",
".",
"files",
".",
"__class__",
"class",
"newcls",
"(",
"oldcls",
")",
":",
"def",
"__getitem__",
"(",
"self",
",",
"key",
")",
":",
"try",
":",
"return",
"oldcls",
".",
"__getitem__",
"(",
"self",
",",
"key",
")",
"except",
"KeyError",
"as",
"e",
":",
"if",
"key",
"not",
"in",
"request",
".",
"form",
":",
"raise",
"raise",
"DebugFilesKeyError",
"(",
"request",
",",
"key",
")",
"newcls",
".",
"__name__",
"=",
"oldcls",
".",
"__name__",
"newcls",
".",
"__module__",
"=",
"oldcls",
".",
"__module__",
"request",
".",
"files",
".",
"__class__",
"=",
"newcls"
] | python | Since Flask 0.8 we're monkeypatching the files object in case a
request is detected that does not use multipart form data but the files
object is accessed. | true |
2,466,586 | def make_abstract_dist(req_to_install):
"""Factory to make an abstract dist object.
Preconditions: Either an editable req with a source_dir, or satisfied_by or
a wheel link, or a non-editable req with a source_dir.
:return: A concrete DistAbstraction.
"""
if req_to_install.editable:
return IsSDist(req_to_install)
elif req_to_install.link and req_to_install.link.is_wheel:
return IsWheel(req_to_install)
else:
return IsSDist(req_to_install) | [
"def",
"make_abstract_dist",
"(",
"req_to_install",
")",
":",
"if",
"req_to_install",
".",
"editable",
":",
"return",
"IsSDist",
"(",
"req_to_install",
")",
"elif",
"req_to_install",
".",
"link",
"and",
"req_to_install",
".",
"link",
".",
"is_wheel",
":",
"return",
"IsWheel",
"(",
"req_to_install",
")",
"else",
":",
"return",
"IsSDist",
"(",
"req_to_install",
")"
] | python | Factory to make an abstract dist object.
Preconditions: Either an editable req with a source_dir, or satisfied_by or
a wheel link, or a non-editable req with a source_dir.
:return: A concrete DistAbstraction. | true |
2,466,656 | def html_annotate(doclist, markup=default_markup):
"""
doclist should be ordered from oldest to newest, like::
>>> version1 = 'Hello World'
>>> version2 = 'Goodbye World'
>>> print(html_annotate([(version1, 'version 1'),
... (version2, 'version 2')]))
<span title="version 2">Goodbye</span> <span title="version 1">World</span>
The documents must be *fragments* (str/UTF8 or unicode), not
complete documents
The markup argument is a function to markup the spans of words.
This function is called like markup('Hello', 'version 2'), and
returns HTML. The first argument is text and never includes any
markup. The default uses a span with a title:
>>> print(default_markup('Some Text', 'by Joe'))
<span title="by Joe">Some Text</span>
"""
# The basic strategy we have is to split the documents up into
# logical tokens (which are words with attached markup). We then
# do diffs of each of the versions to track when a token first
# appeared in the document; the annotation attached to the token
# is the version where it first appeared.
tokenlist = [tokenize_annotated(doc, version)
for doc, version in doclist]
cur_tokens = tokenlist[0]
for tokens in tokenlist[1:]:
html_annotate_merge_annotations(cur_tokens, tokens)
cur_tokens = tokens
# After we've tracked all the tokens, we can combine spans of text
# that are adjacent and have the same annotation
cur_tokens = compress_tokens(cur_tokens)
# And finally add markup
result = markup_serialize_tokens(cur_tokens, markup)
return ''.join(result).strip() | [
"def",
"html_annotate",
"(",
"doclist",
",",
"markup",
"=",
"default_markup",
")",
":",
"tokenlist",
"=",
"[",
"tokenize_annotated",
"(",
"doc",
",",
"version",
")",
"for",
"doc",
",",
"version",
"in",
"doclist",
"]",
"cur_tokens",
"=",
"tokenlist",
"[",
"0",
"]",
"for",
"tokens",
"in",
"tokenlist",
"[",
"1",
":",
"]",
":",
"html_annotate_merge_annotations",
"(",
"cur_tokens",
",",
"tokens",
")",
"cur_tokens",
"=",
"tokens",
"cur_tokens",
"=",
"compress_tokens",
"(",
"cur_tokens",
")",
"result",
"=",
"markup_serialize_tokens",
"(",
"cur_tokens",
",",
"markup",
")",
"return",
"''",
".",
"join",
"(",
"result",
")",
".",
"strip",
"(",
")"
] | python | doclist should be ordered from oldest to newest, like::
>>> version1 = 'Hello World'
>>> version2 = 'Goodbye World'
>>> print(html_annotate([(version1, 'version 1'),
... (version2, 'version 2')]))
<span title="version 2">Goodbye</span> <span title="version 1">World</span>
The documents must be *fragments* (str/UTF8 or unicode), not
complete documents
The markup argument is a function to markup the spans of words.
This function is called like markup('Hello', 'version 2'), and
returns HTML. The first argument is text and never includes any
markup. The default uses a span with a title:
>>> print(default_markup('Some Text', 'by Joe'))
<span title="by Joe">Some Text</span> | true |
2,466,657 | def tokenize_annotated(doc, annotation):
"""Tokenize a document and add an annotation attribute to each token
"""
tokens = tokenize(doc, include_hrefs=False)
for tok in tokens:
tok.annotation = annotation
return tokens | [
"def",
"tokenize_annotated",
"(",
"doc",
",",
"annotation",
")",
":",
"tokens",
"=",
"tokenize",
"(",
"doc",
",",
"include_hrefs",
"=",
"False",
")",
"for",
"tok",
"in",
"tokens",
":",
"tok",
".",
"annotation",
"=",
"annotation",
"return",
"tokens"
] | python | Tokenize a document and add an annotation attribute to each token | true |
2,466,658 | def html_annotate_merge_annotations(tokens_old, tokens_new):
"""Merge the annotations from tokens_old into tokens_new, when the
tokens in the new document already existed in the old document.
"""
s = InsensitiveSequenceMatcher(a=tokens_old, b=tokens_new)
commands = s.get_opcodes()
for command, i1, i2, j1, j2 in commands:
if command == 'equal':
eq_old = tokens_old[i1:i2]
eq_new = tokens_new[j1:j2]
copy_annotations(eq_old, eq_new) | [
"def",
"html_annotate_merge_annotations",
"(",
"tokens_old",
",",
"tokens_new",
")",
":",
"s",
"=",
"InsensitiveSequenceMatcher",
"(",
"a",
"=",
"tokens_old",
",",
"b",
"=",
"tokens_new",
")",
"commands",
"=",
"s",
".",
"get_opcodes",
"(",
")",
"for",
"command",
",",
"i1",
",",
"i2",
",",
"j1",
",",
"j2",
"in",
"commands",
":",
"if",
"command",
"==",
"'equal'",
":",
"eq_old",
"=",
"tokens_old",
"[",
"i1",
":",
"i2",
"]",
"eq_new",
"=",
"tokens_new",
"[",
"j1",
":",
"j2",
"]",
"copy_annotations",
"(",
"eq_old",
",",
"eq_new",
")"
] | python | Merge the annotations from tokens_old into tokens_new, when the
tokens in the new document already existed in the old document. | true |
2,466,659 | def copy_annotations(src, dest):
"""
Copy annotations from the tokens listed in src to the tokens in dest
"""
assert len(src) == len(dest)
for src_tok, dest_tok in zip(src, dest):
dest_tok.annotation = src_tok.annotation | [
"def",
"copy_annotations",
"(",
"src",
",",
"dest",
")",
":",
"assert",
"len",
"(",
"src",
")",
"==",
"len",
"(",
"dest",
")",
"for",
"src_tok",
",",
"dest_tok",
"in",
"zip",
"(",
"src",
",",
"dest",
")",
":",
"dest_tok",
".",
"annotation",
"=",
"src_tok",
".",
"annotation"
] | python | Copy annotations from the tokens listed in src to the tokens in dest | true |
2,466,660 | def compress_tokens(tokens):
"""
Combine adjacent tokens when there is no HTML between the tokens,
and they share an annotation
"""
result = [tokens[0]]
for tok in tokens[1:]:
if (not result[-1].post_tags and
not tok.pre_tags and
result[-1].annotation == tok.annotation):
compress_merge_back(result, tok)
else:
result.append(tok)
return result | [
"def",
"compress_tokens",
"(",
"tokens",
")",
":",
"result",
"=",
"[",
"tokens",
"[",
"0",
"]",
"]",
"for",
"tok",
"in",
"tokens",
"[",
"1",
":",
"]",
":",
"if",
"(",
"not",
"result",
"[",
"-",
"1",
"]",
".",
"post_tags",
"and",
"not",
"tok",
".",
"pre_tags",
"and",
"result",
"[",
"-",
"1",
"]",
".",
"annotation",
"==",
"tok",
".",
"annotation",
")",
":",
"compress_merge_back",
"(",
"result",
",",
"tok",
")",
"else",
":",
"result",
".",
"append",
"(",
"tok",
")",
"return",
"result"
] | python | Combine adjacent tokens when there is no HTML between the tokens,
and they share an annotation | true |
2,466,661 | def compress_merge_back(tokens, tok):
""" Merge tok into the last element of tokens (modifying the list of
tokens in-place). """
last = tokens[-1]
if type(last) is not token or type(tok) is not token:
tokens.append(tok)
else:
text = _unicode(last)
if last.trailing_whitespace:
text += last.trailing_whitespace
text += tok
merged = token(text,
pre_tags=last.pre_tags,
post_tags=tok.post_tags,
trailing_whitespace=tok.trailing_whitespace)
merged.annotation = last.annotation
tokens[-1] = merged | [
"def",
"compress_merge_back",
"(",
"tokens",
",",
"tok",
")",
":",
"last",
"=",
"tokens",
"[",
"-",
"1",
"]",
"if",
"type",
"(",
"last",
")",
"is",
"not",
"token",
"or",
"type",
"(",
"tok",
")",
"is",
"not",
"token",
":",
"tokens",
".",
"append",
"(",
"tok",
")",
"else",
":",
"text",
"=",
"_unicode",
"(",
"last",
")",
"if",
"last",
".",
"trailing_whitespace",
":",
"text",
"+=",
"last",
".",
"trailing_whitespace",
"text",
"+=",
"tok",
"merged",
"=",
"token",
"(",
"text",
",",
"pre_tags",
"=",
"last",
".",
"pre_tags",
",",
"post_tags",
"=",
"tok",
".",
"post_tags",
",",
"trailing_whitespace",
"=",
"tok",
".",
"trailing_whitespace",
")",
"merged",
".",
"annotation",
"=",
"last",
".",
"annotation",
"tokens",
"[",
"-",
"1",
"]",
"=",
"merged"
] | python | Merge tok into the last element of tokens (modifying the list of
tokens in-place). | true |
2,466,662 | def markup_serialize_tokens(tokens, markup_func):
"""
Serialize the list of tokens into a list of text chunks, calling
markup_func around text to add annotations.
"""
for token in tokens:
for pre in token.pre_tags:
yield pre
html = token.html()
html = markup_func(html, token.annotation)
if token.trailing_whitespace:
html += token.trailing_whitespace
yield html
for post in token.post_tags:
yield post | [
"def",
"markup_serialize_tokens",
"(",
"tokens",
",",
"markup_func",
")",
":",
"for",
"token",
"in",
"tokens",
":",
"for",
"pre",
"in",
"token",
".",
"pre_tags",
":",
"yield",
"pre",
"html",
"=",
"token",
".",
"html",
"(",
")",
"html",
"=",
"markup_func",
"(",
"html",
",",
"token",
".",
"annotation",
")",
"if",
"token",
".",
"trailing_whitespace",
":",
"html",
"+=",
"token",
".",
"trailing_whitespace",
"yield",
"html",
"for",
"post",
"in",
"token",
".",
"post_tags",
":",
"yield",
"post"
] | python | Serialize the list of tokens into a list of text chunks, calling
markup_func around text to add annotations. | true |
2,466,663 | def htmldiff(old_html, new_html):
## FIXME: this should take parsed documents too, and use their body
## or other content.
""" Do a diff of the old and new document. The documents are HTML
*fragments* (str/UTF8 or unicode), they are not complete documents
(i.e., no <html> tag).
Returns HTML with <ins> and <del> tags added around the
appropriate text.
Markup is generally ignored, with the markup from new_html
preserved, and possibly some markup from old_html (though it is
considered acceptable to lose some of the old markup). Only the
words in the HTML are diffed. The exception is <img> tags, which
are treated like words, and the href attribute of <a> tags, which
are noted inside the tag itself when there are changes.
"""
old_html_tokens = tokenize(old_html)
new_html_tokens = tokenize(new_html)
result = htmldiff_tokens(old_html_tokens, new_html_tokens)
result = ''.join(result).strip()
return fixup_ins_del_tags(result) | [
"def",
"htmldiff",
"(",
"old_html",
",",
"new_html",
")",
":",
"old_html_tokens",
"=",
"tokenize",
"(",
"old_html",
")",
"new_html_tokens",
"=",
"tokenize",
"(",
"new_html",
")",
"result",
"=",
"htmldiff_tokens",
"(",
"old_html_tokens",
",",
"new_html_tokens",
")",
"result",
"=",
"''",
".",
"join",
"(",
"result",
")",
".",
"strip",
"(",
")",
"return",
"fixup_ins_del_tags",
"(",
"result",
")"
] | python | Do a diff of the old and new document. The documents are HTML
*fragments* (str/UTF8 or unicode), they are not complete documents
(i.e., no <html> tag).
Returns HTML with <ins> and <del> tags added around the
appropriate text.
Markup is generally ignored, with the markup from new_html
preserved, and possibly some markup from old_html (though it is
considered acceptable to lose some of the old markup). Only the
words in the HTML are diffed. The exception is <img> tags, which
are treated like words, and the href attribute of <a> tags, which
are noted inside the tag itself when there are changes. | true |
2,466,664 | def htmldiff_tokens(html1_tokens, html2_tokens):
""" Does a diff on the tokens themselves, returning a list of text
chunks (not tokens).
"""
# There are several passes as we do the differences. The tokens
# isolate the portion of the content we care to diff; difflib does
# all the actual hard work at that point.
#
# Then we must create a valid document from pieces of both the old
# document and the new document. We generally prefer to take
# markup from the new document, and only do a best effort attempt
# to keep markup from the old document; anything that we can't
# resolve we throw away. Also we try to put the deletes as close
# to the location where we think they would have been -- because
# we are only keeping the markup from the new document, it can be
# fuzzy where in the new document the old text would have gone.
# Again we just do a best effort attempt.
s = InsensitiveSequenceMatcher(a=html1_tokens, b=html2_tokens)
commands = s.get_opcodes()
result = []
for command, i1, i2, j1, j2 in commands:
if command == 'equal':
result.extend(expand_tokens(html2_tokens[j1:j2], equal=True))
continue
if command == 'insert' or command == 'replace':
ins_tokens = expand_tokens(html2_tokens[j1:j2])
merge_insert(ins_tokens, result)
if command == 'delete' or command == 'replace':
del_tokens = expand_tokens(html1_tokens[i1:i2])
merge_delete(del_tokens, result)
# If deletes were inserted directly as <del> then we'd have an
# invalid document at this point. Instead we put in special
# markers, and when the complete diffed document has been created
# we try to move the deletes around and resolve any problems.
result = cleanup_delete(result)
return result | [
"def",
"htmldiff_tokens",
"(",
"html1_tokens",
",",
"html2_tokens",
")",
":",
"s",
"=",
"InsensitiveSequenceMatcher",
"(",
"a",
"=",
"html1_tokens",
",",
"b",
"=",
"html2_tokens",
")",
"commands",
"=",
"s",
".",
"get_opcodes",
"(",
")",
"result",
"=",
"[",
"]",
"for",
"command",
",",
"i1",
",",
"i2",
",",
"j1",
",",
"j2",
"in",
"commands",
":",
"if",
"command",
"==",
"'equal'",
":",
"result",
".",
"extend",
"(",
"expand_tokens",
"(",
"html2_tokens",
"[",
"j1",
":",
"j2",
"]",
",",
"equal",
"=",
"True",
")",
")",
"continue",
"if",
"command",
"==",
"'insert'",
"or",
"command",
"==",
"'replace'",
":",
"ins_tokens",
"=",
"expand_tokens",
"(",
"html2_tokens",
"[",
"j1",
":",
"j2",
"]",
")",
"merge_insert",
"(",
"ins_tokens",
",",
"result",
")",
"if",
"command",
"==",
"'delete'",
"or",
"command",
"==",
"'replace'",
":",
"del_tokens",
"=",
"expand_tokens",
"(",
"html1_tokens",
"[",
"i1",
":",
"i2",
"]",
")",
"merge_delete",
"(",
"del_tokens",
",",
"result",
")",
"result",
"=",
"cleanup_delete",
"(",
"result",
")",
"return",
"result"
] | python | Does a diff on the tokens themselves, returning a list of text
chunks (not tokens). | true |
2,466,665 | def expand_tokens(tokens, equal=False):
"""Given a list of tokens, return a generator of the chunks of
text for the data in the tokens.
"""
for token in tokens:
for pre in token.pre_tags:
yield pre
if not equal or not token.hide_when_equal:
if token.trailing_whitespace:
yield token.html() + token.trailing_whitespace
else:
yield token.html()
for post in token.post_tags:
yield post | [
"def",
"expand_tokens",
"(",
"tokens",
",",
"equal",
"=",
"False",
")",
":",
"for",
"token",
"in",
"tokens",
":",
"for",
"pre",
"in",
"token",
".",
"pre_tags",
":",
"yield",
"pre",
"if",
"not",
"equal",
"or",
"not",
"token",
".",
"hide_when_equal",
":",
"if",
"token",
".",
"trailing_whitespace",
":",
"yield",
"token",
".",
"html",
"(",
")",
"+",
"token",
".",
"trailing_whitespace",
"else",
":",
"yield",
"token",
".",
"html",
"(",
")",
"for",
"post",
"in",
"token",
".",
"post_tags",
":",
"yield",
"post"
] | python | Given a list of tokens, return a generator of the chunks of
text for the data in the tokens. | true |
2,466,666 | def merge_insert(ins_chunks, doc):
""" doc is the already-handled document (as a list of text chunks);
here we add <ins>ins_chunks</ins> to the end of that. """
# Though we don't throw away unbalanced_start or unbalanced_end
# (we assume there is accompanying markup later or earlier in the
# document), we only put <ins> around the balanced portion.
unbalanced_start, balanced, unbalanced_end = split_unbalanced(ins_chunks)
doc.extend(unbalanced_start)
if doc and not doc[-1].endswith(' '):
# Fix up the case where the word before the insert didn't end with
# a space
doc[-1] += ' '
doc.append('<ins>')
if balanced and balanced[-1].endswith(' '):
# We move space outside of </ins>
balanced[-1] = balanced[-1][:-1]
doc.extend(balanced)
doc.append('</ins> ')
doc.extend(unbalanced_end) | [
"def",
"merge_insert",
"(",
"ins_chunks",
",",
"doc",
")",
":",
"unbalanced_start",
",",
"balanced",
",",
"unbalanced_end",
"=",
"split_unbalanced",
"(",
"ins_chunks",
")",
"doc",
".",
"extend",
"(",
"unbalanced_start",
")",
"if",
"doc",
"and",
"not",
"doc",
"[",
"-",
"1",
"]",
".",
"endswith",
"(",
"' '",
")",
":",
"doc",
"[",
"-",
"1",
"]",
"+=",
"' '",
"doc",
".",
"append",
"(",
"'<ins>'",
")",
"if",
"balanced",
"and",
"balanced",
"[",
"-",
"1",
"]",
".",
"endswith",
"(",
"' '",
")",
":",
"balanced",
"[",
"-",
"1",
"]",
"=",
"balanced",
"[",
"-",
"1",
"]",
"[",
":",
"-",
"1",
"]",
"doc",
".",
"extend",
"(",
"balanced",
")",
"doc",
".",
"append",
"(",
"'</ins> '",
")",
"doc",
".",
"extend",
"(",
"unbalanced_end",
")"
] | python | doc is the already-handled document (as a list of text chunks);
here we add <ins>ins_chunks</ins> to the end of that. | true |
2,466,667 | def merge_delete(del_chunks, doc):
""" Adds the text chunks in del_chunks to the document doc (another
list of text chunks) with marker to show it is a delete.
cleanup_delete later resolves these markers into <del> tags."""
doc.append(DEL_START)
doc.extend(del_chunks)
doc.append(DEL_END) | [
"def",
"merge_delete",
"(",
"del_chunks",
",",
"doc",
")",
":",
"doc",
".",
"append",
"(",
"DEL_START",
")",
"doc",
".",
"extend",
"(",
"del_chunks",
")",
"doc",
".",
"append",
"(",
"DEL_END",
")"
] | python | Adds the text chunks in del_chunks to the document doc (another
list of text chunks) with marker to show it is a delete.
cleanup_delete later resolves these markers into <del> tags. | true |
2,466,668 | def cleanup_delete(chunks):
""" Cleans up any DEL_START/DEL_END markers in the document, replacing
them with <del></del>. To do this while keeping the document
valid, it may need to drop some tags (either start or end tags).
It may also move the del into adjacent tags to try to move it to a
similar location where it was originally located (e.g., moving a
delete into preceding <div> tag, if the del looks like (DEL_START,
'Text</div>', DEL_END)"""
while 1:
# Find a pending DEL_START/DEL_END, splitting the document
# into stuff-preceding-DEL_START, stuff-inside, and
# stuff-following-DEL_END
try:
pre_delete, delete, post_delete = split_delete(chunks)
except NoDeletes:
# Nothing found, we've cleaned up the entire doc
break
# The stuff-inside-DEL_START/END may not be well balanced
# markup. First we figure out what unbalanced portions there are:
unbalanced_start, balanced, unbalanced_end = split_unbalanced(delete)
# Then we move the span forward and/or backward based on these
# unbalanced portions:
locate_unbalanced_start(unbalanced_start, pre_delete, post_delete)
locate_unbalanced_end(unbalanced_end, pre_delete, post_delete)
doc = pre_delete
if doc and not doc[-1].endswith(' '):
# Fix up case where the word before us didn't have a trailing space
doc[-1] += ' '
doc.append('<del>')
if balanced and balanced[-1].endswith(' '):
# We move space outside of </del>
balanced[-1] = balanced[-1][:-1]
doc.extend(balanced)
doc.append('</del> ')
doc.extend(post_delete)
chunks = doc
return chunks | [
"def",
"cleanup_delete",
"(",
"chunks",
")",
":",
"while",
"1",
":",
"try",
":",
"pre_delete",
",",
"delete",
",",
"post_delete",
"=",
"split_delete",
"(",
"chunks",
")",
"except",
"NoDeletes",
":",
"break",
"unbalanced_start",
",",
"balanced",
",",
"unbalanced_end",
"=",
"split_unbalanced",
"(",
"delete",
")",
"locate_unbalanced_start",
"(",
"unbalanced_start",
",",
"pre_delete",
",",
"post_delete",
")",
"locate_unbalanced_end",
"(",
"unbalanced_end",
",",
"pre_delete",
",",
"post_delete",
")",
"doc",
"=",
"pre_delete",
"if",
"doc",
"and",
"not",
"doc",
"[",
"-",
"1",
"]",
".",
"endswith",
"(",
"' '",
")",
":",
"doc",
"[",
"-",
"1",
"]",
"+=",
"' '",
"doc",
".",
"append",
"(",
"'<del>'",
")",
"if",
"balanced",
"and",
"balanced",
"[",
"-",
"1",
"]",
".",
"endswith",
"(",
"' '",
")",
":",
"balanced",
"[",
"-",
"1",
"]",
"=",
"balanced",
"[",
"-",
"1",
"]",
"[",
":",
"-",
"1",
"]",
"doc",
".",
"extend",
"(",
"balanced",
")",
"doc",
".",
"append",
"(",
"'</del> '",
")",
"doc",
".",
"extend",
"(",
"post_delete",
")",
"chunks",
"=",
"doc",
"return",
"chunks"
] | python | Cleans up any DEL_START/DEL_END markers in the document, replacing
them with <del></del>. To do this while keeping the document
valid, it may need to drop some tags (either start or end tags).
It may also move the del into adjacent tags to try to move it to a
similar location where it was originally located (e.g., moving a
delete into preceding <div> tag, if the del looks like (DEL_START,
'Text</div>', DEL_END) | true |
2,466,671 | def locate_unbalanced_start(unbalanced_start, pre_delete, post_delete):
""" pre_delete and post_delete implicitly point to a place in the
document (where the two were split). This moves that point (by
popping items from one and pushing them onto the other). It moves
the point to try to find a place where unbalanced_start applies.
As an example::
>>> unbalanced_start = ['<div>']
>>> doc = ['<p>', 'Text', '</p>', '<div>', 'More Text', '</div>']
>>> pre, post = doc[:3], doc[3:]
>>> pre, post
(['<p>', 'Text', '</p>'], ['<div>', 'More Text', '</div>'])
>>> locate_unbalanced_start(unbalanced_start, pre, post)
>>> pre, post
(['<p>', 'Text', '</p>', '<div>'], ['More Text', '</div>'])
As you can see, we moved the point so that the dangling <div> that
we found will be effectively replaced by the div in the original
document. If this doesn't work out, we just throw away
unbalanced_start without doing anything.
"""
while 1:
if not unbalanced_start:
# We have totally succeded in finding the position
break
finding = unbalanced_start[0]
finding_name = finding.split()[0].strip('<>')
if not post_delete:
break
next = post_delete[0]
if next is DEL_START or not next.startswith('<'):
# Reached a word, we can't move the delete text forward
break
if next[1] == '/':
# Reached a closing tag, can we go further? Maybe not...
break
name = next.split()[0].strip('<>')
if name == 'ins':
# Can't move into an insert
break
assert name != 'del', (
"Unexpected delete tag: %r" % next)
if name == finding_name:
unbalanced_start.pop(0)
pre_delete.append(post_delete.pop(0))
else:
# Found a tag that doesn't match
break | [
"def",
"locate_unbalanced_start",
"(",
"unbalanced_start",
",",
"pre_delete",
",",
"post_delete",
")",
":",
"while",
"1",
":",
"if",
"not",
"unbalanced_start",
":",
"break",
"finding",
"=",
"unbalanced_start",
"[",
"0",
"]",
"finding_name",
"=",
"finding",
".",
"split",
"(",
")",
"[",
"0",
"]",
".",
"strip",
"(",
"'<>'",
")",
"if",
"not",
"post_delete",
":",
"break",
"next",
"=",
"post_delete",
"[",
"0",
"]",
"if",
"next",
"is",
"DEL_START",
"or",
"not",
"next",
".",
"startswith",
"(",
"'<'",
")",
":",
"break",
"if",
"next",
"[",
"1",
"]",
"==",
"'/'",
":",
"break",
"name",
"=",
"next",
".",
"split",
"(",
")",
"[",
"0",
"]",
".",
"strip",
"(",
"'<>'",
")",
"if",
"name",
"==",
"'ins'",
":",
"break",
"assert",
"name",
"!=",
"'del'",
",",
"(",
"\"Unexpected delete tag: %r\"",
"%",
"next",
")",
"if",
"name",
"==",
"finding_name",
":",
"unbalanced_start",
".",
"pop",
"(",
"0",
")",
"pre_delete",
".",
"append",
"(",
"post_delete",
".",
"pop",
"(",
"0",
")",
")",
"else",
":",
"break"
] | python | pre_delete and post_delete implicitly point to a place in the
document (where the two were split). This moves that point (by
popping items from one and pushing them onto the other). It moves
the point to try to find a place where unbalanced_start applies.
As an example::
>>> unbalanced_start = ['<div>']
>>> doc = ['<p>', 'Text', '</p>', '<div>', 'More Text', '</div>']
>>> pre, post = doc[:3], doc[3:]
>>> pre, post
(['<p>', 'Text', '</p>'], ['<div>', 'More Text', '</div>'])
>>> locate_unbalanced_start(unbalanced_start, pre, post)
>>> pre, post
(['<p>', 'Text', '</p>', '<div>'], ['More Text', '</div>'])
As you can see, we moved the point so that the dangling <div> that
we found will be effectively replaced by the div in the original
document. If this doesn't work out, we just throw away
unbalanced_start without doing anything. | true |
2,466,672 | def locate_unbalanced_end(unbalanced_end, pre_delete, post_delete):
""" like locate_unbalanced_start, except handling end tags and
possibly moving the point earlier in the document. """
while 1:
if not unbalanced_end:
# Success
break
finding = unbalanced_end[-1]
finding_name = finding.split()[0].strip('<>/')
if not pre_delete:
break
next = pre_delete[-1]
if next is DEL_END or not next.startswith('</'):
# A word or a start tag
break
name = next.split()[0].strip('<>/')
if name == 'ins' or name == 'del':
# Can't move into an insert or delete
break
if name == finding_name:
unbalanced_end.pop()
post_delete.insert(0, pre_delete.pop())
else:
# Found a tag that doesn't match
break | [
"def",
"locate_unbalanced_end",
"(",
"unbalanced_end",
",",
"pre_delete",
",",
"post_delete",
")",
":",
"while",
"1",
":",
"if",
"not",
"unbalanced_end",
":",
"break",
"finding",
"=",
"unbalanced_end",
"[",
"-",
"1",
"]",
"finding_name",
"=",
"finding",
".",
"split",
"(",
")",
"[",
"0",
"]",
".",
"strip",
"(",
"'<>/'",
")",
"if",
"not",
"pre_delete",
":",
"break",
"next",
"=",
"pre_delete",
"[",
"-",
"1",
"]",
"if",
"next",
"is",
"DEL_END",
"or",
"not",
"next",
".",
"startswith",
"(",
"'</'",
")",
":",
"break",
"name",
"=",
"next",
".",
"split",
"(",
")",
"[",
"0",
"]",
".",
"strip",
"(",
"'<>/'",
")",
"if",
"name",
"==",
"'ins'",
"or",
"name",
"==",
"'del'",
":",
"break",
"if",
"name",
"==",
"finding_name",
":",
"unbalanced_end",
".",
"pop",
"(",
")",
"post_delete",
".",
"insert",
"(",
"0",
",",
"pre_delete",
".",
"pop",
"(",
")",
")",
"else",
":",
"break"
] | python | like locate_unbalanced_start, except handling end tags and
possibly moving the point earlier in the document. | true |
2,466,675 | def cleanup_html(html):
""" This 'cleans' the HTML, meaning that any page structure is removed
(only the contents of <body> are used, if there is any <body).
Also <ins> and <del> tags are removed. """
match = _body_re.search(html)
if match:
html = html[match.end():]
match = _end_body_re.search(html)
if match:
html = html[:match.start()]
html = _ins_del_re.sub('', html)
return html | [
"def",
"cleanup_html",
"(",
"html",
")",
":",
"match",
"=",
"_body_re",
".",
"search",
"(",
"html",
")",
"if",
"match",
":",
"html",
"=",
"html",
"[",
"match",
".",
"end",
"(",
")",
":",
"]",
"match",
"=",
"_end_body_re",
".",
"search",
"(",
"html",
")",
"if",
"match",
":",
"html",
"=",
"html",
"[",
":",
"match",
".",
"start",
"(",
")",
"]",
"html",
"=",
"_ins_del_re",
".",
"sub",
"(",
"''",
",",
"html",
")",
"return",
"html"
] | python | This 'cleans' the HTML, meaning that any page structure is removed
(only the contents of <body> are used, if there is any <body).
Also <ins> and <del> tags are removed. | true |
2,466,676 | def split_trailing_whitespace(word):
"""
This function takes a word, such as 'test\n\n' and returns ('test','\n\n')
"""
stripped_length = len(word.rstrip())
return word[0:stripped_length], word[stripped_length:] | [
"def",
"split_trailing_whitespace",
"(",
"word",
")",
":",
"stripped_length",
"=",
"len",
"(",
"word",
".",
"rstrip",
"(",
")",
")",
"return",
"word",
"[",
"0",
":",
"stripped_length",
"]",
",",
"word",
"[",
"stripped_length",
":",
"]"
] | python | This function takes a word, such as 'test\n\n' and returns ('test','\n\n') | true |
2,466,678 | def flatten_el(el, include_hrefs, skip_tag=False):
""" Takes an lxml element el, and generates all the text chunks for
that tag. Each start tag is a chunk, each word is a chunk, and each
end tag is a chunk.
If skip_tag is true, then the outermost container tag is
not returned (just its contents)."""
if not skip_tag:
if el.tag == 'img':
yield ('img', el.get('src'), start_tag(el))
else:
yield start_tag(el)
if el.tag in empty_tags and not el.text and not len(el) and not el.tail:
return
start_words = split_words(el.text)
for word in start_words:
yield html_escape(word)
for child in el:
for item in flatten_el(child, include_hrefs=include_hrefs):
yield item
if el.tag == 'a' and el.get('href') and include_hrefs:
yield ('href', el.get('href'))
if not skip_tag:
yield end_tag(el)
end_words = split_words(el.tail)
for word in end_words:
yield html_escape(word) | [
"def",
"flatten_el",
"(",
"el",
",",
"include_hrefs",
",",
"skip_tag",
"=",
"False",
")",
":",
"if",
"not",
"skip_tag",
":",
"if",
"el",
".",
"tag",
"==",
"'img'",
":",
"yield",
"(",
"'img'",
",",
"el",
".",
"get",
"(",
"'src'",
")",
",",
"start_tag",
"(",
"el",
")",
")",
"else",
":",
"yield",
"start_tag",
"(",
"el",
")",
"if",
"el",
".",
"tag",
"in",
"empty_tags",
"and",
"not",
"el",
".",
"text",
"and",
"not",
"len",
"(",
"el",
")",
"and",
"not",
"el",
".",
"tail",
":",
"return",
"start_words",
"=",
"split_words",
"(",
"el",
".",
"text",
")",
"for",
"word",
"in",
"start_words",
":",
"yield",
"html_escape",
"(",
"word",
")",
"for",
"child",
"in",
"el",
":",
"for",
"item",
"in",
"flatten_el",
"(",
"child",
",",
"include_hrefs",
"=",
"include_hrefs",
")",
":",
"yield",
"item",
"if",
"el",
".",
"tag",
"==",
"'a'",
"and",
"el",
".",
"get",
"(",
"'href'",
")",
"and",
"include_hrefs",
":",
"yield",
"(",
"'href'",
",",
"el",
".",
"get",
"(",
"'href'",
")",
")",
"if",
"not",
"skip_tag",
":",
"yield",
"end_tag",
"(",
"el",
")",
"end_words",
"=",
"split_words",
"(",
"el",
".",
"tail",
")",
"for",
"word",
"in",
"end_words",
":",
"yield",
"html_escape",
"(",
"word",
")"
] | python | Takes an lxml element el, and generates all the text chunks for
that tag. Each start tag is a chunk, each word is a chunk, and each
end tag is a chunk.
If skip_tag is true, then the outermost container tag is
not returned (just its contents). | true |
2,466,679 | def split_words(text):
""" Splits some text into words. Includes trailing whitespace
on each word when appropriate. """
if not text or not text.strip():
return []
words = split_words_re.findall(text)
return words | [
"def",
"split_words",
"(",
"text",
")",
":",
"if",
"not",
"text",
"or",
"not",
"text",
".",
"strip",
"(",
")",
":",
"return",
"[",
"]",
"words",
"=",
"split_words_re",
".",
"findall",
"(",
"text",
")",
"return",
"words"
] | python | Splits some text into words. Includes trailing whitespace
on each word when appropriate. | true |
2,466,682 | def fixup_ins_del_tags(html):
""" Given an html string, move any <ins> or <del> tags inside of any
block-level elements, e.g. transform <ins><p>word</p></ins> to
<p><ins>word</ins></p> """
doc = parse_html(html, cleanup=False)
_fixup_ins_del_tags(doc)
html = serialize_html_fragment(doc, skip_outer=True)
return html | [
"def",
"fixup_ins_del_tags",
"(",
"html",
")",
":",
"doc",
"=",
"parse_html",
"(",
"html",
",",
"cleanup",
"=",
"False",
")",
"_fixup_ins_del_tags",
"(",
"doc",
")",
"html",
"=",
"serialize_html_fragment",
"(",
"doc",
",",
"skip_outer",
"=",
"True",
")",
"return",
"html"
] | python | Given an html string, move any <ins> or <del> tags inside of any
block-level elements, e.g. transform <ins><p>word</p></ins> to
<p><ins>word</ins></p> | true |
2,466,683 | def serialize_html_fragment(el, skip_outer=False):
""" Serialize a single lxml element as HTML. The serialized form
includes the elements tail.
If skip_outer is true, then don't serialize the outermost tag
"""
assert not isinstance(el, basestring), (
"You should pass in an element, not a string like %r" % el)
html = etree.tostring(el, method="html", encoding=_unicode)
if skip_outer:
# Get rid of the extra starting tag:
html = html[html.find('>')+1:]
# Get rid of the extra end tag:
html = html[:html.rfind('<')]
return html.strip()
else:
return html | [
"def",
"serialize_html_fragment",
"(",
"el",
",",
"skip_outer",
"=",
"False",
")",
":",
"assert",
"not",
"isinstance",
"(",
"el",
",",
"basestring",
")",
",",
"(",
"\"You should pass in an element, not a string like %r\"",
"%",
"el",
")",
"html",
"=",
"etree",
".",
"tostring",
"(",
"el",
",",
"method",
"=",
"\"html\"",
",",
"encoding",
"=",
"_unicode",
")",
"if",
"skip_outer",
":",
"html",
"=",
"html",
"[",
"html",
".",
"find",
"(",
"'>'",
")",
"+",
"1",
":",
"]",
"html",
"=",
"html",
"[",
":",
"html",
".",
"rfind",
"(",
"'<'",
")",
"]",
"return",
"html",
".",
"strip",
"(",
")",
"else",
":",
"return",
"html"
] | python | Serialize a single lxml element as HTML. The serialized form
includes the elements tail.
If skip_outer is true, then don't serialize the outermost tag | true |
2,466,684 | def _fixup_ins_del_tags(doc):
"""fixup_ins_del_tags that works on an lxml document in-place
"""
for tag in ['ins', 'del']:
for el in doc.xpath('descendant-or-self::%s' % tag):
if not _contains_block_level_tag(el):
continue
_move_el_inside_block(el, tag=tag)
el.drop_tag() | [
"def",
"_fixup_ins_del_tags",
"(",
"doc",
")",
":",
"for",
"tag",
"in",
"[",
"'ins'",
",",
"'del'",
"]",
":",
"for",
"el",
"in",
"doc",
".",
"xpath",
"(",
"'descendant-or-self::%s'",
"%",
"tag",
")",
":",
"if",
"not",
"_contains_block_level_tag",
"(",
"el",
")",
":",
"continue",
"_move_el_inside_block",
"(",
"el",
",",
"tag",
"=",
"tag",
")",
"el",
".",
"drop_tag",
"(",
")"
] | python | fixup_ins_del_tags that works on an lxml document in-place | true |
2,466,685 | def _contains_block_level_tag(el):
"""True if the element contains any block-level elements, like <p>, <td>, etc.
"""
if el.tag in block_level_tags or el.tag in block_level_container_tags:
return True
for child in el:
if _contains_block_level_tag(child):
return True
return False | [
"def",
"_contains_block_level_tag",
"(",
"el",
")",
":",
"if",
"el",
".",
"tag",
"in",
"block_level_tags",
"or",
"el",
".",
"tag",
"in",
"block_level_container_tags",
":",
"return",
"True",
"for",
"child",
"in",
"el",
":",
"if",
"_contains_block_level_tag",
"(",
"child",
")",
":",
"return",
"True",
"return",
"False"
] | python | True if the element contains any block-level elements, like <p>, <td>, etc. | true |
2,466,686 | def _move_el_inside_block(el, tag):
""" helper for _fixup_ins_del_tags; actually takes the <ins> etc tags
and moves them inside any block-level tags. """
for child in el:
if _contains_block_level_tag(child):
break
else:
import sys
# No block-level tags in any child
children_tag = etree.Element(tag)
children_tag.text = el.text
el.text = None
children_tag.extend(list(el))
el[:] = [children_tag]
return
for child in list(el):
if _contains_block_level_tag(child):
_move_el_inside_block(child, tag)
if child.tail:
tail_tag = etree.Element(tag)
tail_tag.text = child.tail
child.tail = None
el.insert(el.index(child)+1, tail_tag)
else:
child_tag = etree.Element(tag)
el.replace(child, child_tag)
child_tag.append(child)
if el.text:
text_tag = etree.Element(tag)
text_tag.text = el.text
el.text = None
el.insert(0, text_tag) | [
"def",
"_move_el_inside_block",
"(",
"el",
",",
"tag",
")",
":",
"for",
"child",
"in",
"el",
":",
"if",
"_contains_block_level_tag",
"(",
"child",
")",
":",
"break",
"else",
":",
"import",
"sys",
"children_tag",
"=",
"etree",
".",
"Element",
"(",
"tag",
")",
"children_tag",
".",
"text",
"=",
"el",
".",
"text",
"el",
".",
"text",
"=",
"None",
"children_tag",
".",
"extend",
"(",
"list",
"(",
"el",
")",
")",
"el",
"[",
":",
"]",
"=",
"[",
"children_tag",
"]",
"return",
"for",
"child",
"in",
"list",
"(",
"el",
")",
":",
"if",
"_contains_block_level_tag",
"(",
"child",
")",
":",
"_move_el_inside_block",
"(",
"child",
",",
"tag",
")",
"if",
"child",
".",
"tail",
":",
"tail_tag",
"=",
"etree",
".",
"Element",
"(",
"tag",
")",
"tail_tag",
".",
"text",
"=",
"child",
".",
"tail",
"child",
".",
"tail",
"=",
"None",
"el",
".",
"insert",
"(",
"el",
".",
"index",
"(",
"child",
")",
"+",
"1",
",",
"tail_tag",
")",
"else",
":",
"child_tag",
"=",
"etree",
".",
"Element",
"(",
"tag",
")",
"el",
".",
"replace",
"(",
"child",
",",
"child_tag",
")",
"child_tag",
".",
"append",
"(",
"child",
")",
"if",
"el",
".",
"text",
":",
"text_tag",
"=",
"etree",
".",
"Element",
"(",
"tag",
")",
"text_tag",
".",
"text",
"=",
"el",
".",
"text",
"el",
".",
"text",
"=",
"None",
"el",
".",
"insert",
"(",
"0",
",",
"text_tag",
")"
] | python | helper for _fixup_ins_del_tags; actually takes the <ins> etc tags
and moves them inside any block-level tags. | true |
2,466,687 | def _merge_element_contents(el):
"""
Removes an element, but merges its contents into its place, e.g.,
given <p>Hi <i>there!</i></p>, if you remove the <i> element you get
<p>Hi there!</p>
"""
parent = el.getparent()
text = el.text or ''
if el.tail:
if not len(el):
text += el.tail
else:
if el[-1].tail:
el[-1].tail += el.tail
else:
el[-1].tail = el.tail
index = parent.index(el)
if text:
if index == 0:
previous = None
else:
previous = parent[index-1]
if previous is None:
if parent.text:
parent.text += text
else:
parent.text = text
else:
if previous.tail:
previous.tail += text
else:
previous.tail = text
parent[index:index+1] = el.getchildren() | [
"def",
"_merge_element_contents",
"(",
"el",
")",
":",
"parent",
"=",
"el",
".",
"getparent",
"(",
")",
"text",
"=",
"el",
".",
"text",
"or",
"''",
"if",
"el",
".",
"tail",
":",
"if",
"not",
"len",
"(",
"el",
")",
":",
"text",
"+=",
"el",
".",
"tail",
"else",
":",
"if",
"el",
"[",
"-",
"1",
"]",
".",
"tail",
":",
"el",
"[",
"-",
"1",
"]",
".",
"tail",
"+=",
"el",
".",
"tail",
"else",
":",
"el",
"[",
"-",
"1",
"]",
".",
"tail",
"=",
"el",
".",
"tail",
"index",
"=",
"parent",
".",
"index",
"(",
"el",
")",
"if",
"text",
":",
"if",
"index",
"==",
"0",
":",
"previous",
"=",
"None",
"else",
":",
"previous",
"=",
"parent",
"[",
"index",
"-",
"1",
"]",
"if",
"previous",
"is",
"None",
":",
"if",
"parent",
".",
"text",
":",
"parent",
".",
"text",
"+=",
"text",
"else",
":",
"parent",
".",
"text",
"=",
"text",
"else",
":",
"if",
"previous",
".",
"tail",
":",
"previous",
".",
"tail",
"+=",
"text",
"else",
":",
"previous",
".",
"tail",
"=",
"text",
"parent",
"[",
"index",
":",
"index",
"+",
"1",
"]",
"=",
"el",
".",
"getchildren",
"(",
")"
] | python | Removes an element, but merges its contents into its place, e.g.,
given <p>Hi <i>there!</i></p>, if you remove the <i> element you get
<p>Hi there!</p> | true |
2,466,694 | def _iter_code(code):
"""Yield '(op,arg)' pair for each operation in code object 'code'"""
from array import array
from dis import HAVE_ARGUMENT, EXTENDED_ARG
bytes = array('b',code.co_code)
eof = len(code.co_code)
ptr = 0
extended_arg = 0
while ptr<eof:
op = bytes[ptr]
if op>=HAVE_ARGUMENT:
arg = bytes[ptr+1] + bytes[ptr+2]*256 + extended_arg
ptr += 3
if op==EXTENDED_ARG:
extended_arg = arg * compat.long_type(65536)
continue
else:
arg = None
ptr += 1
yield op,arg | [
"def",
"_iter_code",
"(",
"code",
")",
":",
"from",
"array",
"import",
"array",
"from",
"dis",
"import",
"HAVE_ARGUMENT",
",",
"EXTENDED_ARG",
"bytes",
"=",
"array",
"(",
"'b'",
",",
"code",
".",
"co_code",
")",
"eof",
"=",
"len",
"(",
"code",
".",
"co_code",
")",
"ptr",
"=",
"0",
"extended_arg",
"=",
"0",
"while",
"ptr",
"<",
"eof",
":",
"op",
"=",
"bytes",
"[",
"ptr",
"]",
"if",
"op",
">=",
"HAVE_ARGUMENT",
":",
"arg",
"=",
"bytes",
"[",
"ptr",
"+",
"1",
"]",
"+",
"bytes",
"[",
"ptr",
"+",
"2",
"]",
"*",
"256",
"+",
"extended_arg",
"ptr",
"+=",
"3",
"if",
"op",
"==",
"EXTENDED_ARG",
":",
"extended_arg",
"=",
"arg",
"*",
"compat",
".",
"long_type",
"(",
"65536",
")",
"continue",
"else",
":",
"arg",
"=",
"None",
"ptr",
"+=",
"1",
"yield",
"op",
",",
"arg"
] | python | Yield '(op,arg)' pair for each operation in code object 'code | true |
2,466,783 | def word_break(el, max_width=40,
avoid_elements=_avoid_word_break_elements,
avoid_classes=_avoid_word_break_classes,
break_character=unichr(0x200b)):
"""
Breaks any long words found in the body of the text (not attributes).
Doesn't effect any of the tags in avoid_elements, by default
``<textarea>`` and ``<pre>``
Breaks words by inserting ​, which is a unicode character
for Zero Width Space character. This generally takes up no space
in rendering, but does copy as a space, and in monospace contexts
usually takes up space.
See http://www.cs.tut.fi/~jkorpela/html/nobr.html for a discussion
"""
# Character suggestion of ​ comes from:
# http://www.cs.tut.fi/~jkorpela/html/nobr.html
if el.tag in _avoid_word_break_elements:
return
class_name = el.get('class')
if class_name:
dont_break = False
class_name = class_name.split()
for avoid in avoid_classes:
if avoid in class_name:
dont_break = True
break
if dont_break:
return
if el.text:
el.text = _break_text(el.text, max_width, break_character)
for child in el:
word_break(child, max_width=max_width,
avoid_elements=avoid_elements,
avoid_classes=avoid_classes,
break_character=break_character)
if child.tail:
child.tail = _break_text(child.tail, max_width, break_character) | [
"def",
"word_break",
"(",
"el",
",",
"max_width",
"=",
"40",
",",
"avoid_elements",
"=",
"_avoid_word_break_elements",
",",
"avoid_classes",
"=",
"_avoid_word_break_classes",
",",
"break_character",
"=",
"unichr",
"(",
"0x200b",
")",
")",
":",
"if",
"el",
".",
"tag",
"in",
"_avoid_word_break_elements",
":",
"return",
"class_name",
"=",
"el",
".",
"get",
"(",
"'class'",
")",
"if",
"class_name",
":",
"dont_break",
"=",
"False",
"class_name",
"=",
"class_name",
".",
"split",
"(",
")",
"for",
"avoid",
"in",
"avoid_classes",
":",
"if",
"avoid",
"in",
"class_name",
":",
"dont_break",
"=",
"True",
"break",
"if",
"dont_break",
":",
"return",
"if",
"el",
".",
"text",
":",
"el",
".",
"text",
"=",
"_break_text",
"(",
"el",
".",
"text",
",",
"max_width",
",",
"break_character",
")",
"for",
"child",
"in",
"el",
":",
"word_break",
"(",
"child",
",",
"max_width",
"=",
"max_width",
",",
"avoid_elements",
"=",
"avoid_elements",
",",
"avoid_classes",
"=",
"avoid_classes",
",",
"break_character",
"=",
"break_character",
")",
"if",
"child",
".",
"tail",
":",
"child",
".",
"tail",
"=",
"_break_text",
"(",
"child",
".",
"tail",
",",
"max_width",
",",
"break_character",
")"
] | python | Breaks any long words found in the body of the text (not attributes).
Doesn't effect any of the tags in avoid_elements, by default
``<textarea>`` and ``<pre>``
Breaks words by inserting ​, which is a unicode character
for Zero Width Space character. This generally takes up no space
in rendering, but does copy as a space, and in monospace contexts
usually takes up space.
See http://www.cs.tut.fi/~jkorpela/html/nobr.html for a discussion | true |
2,466,944 | def document_fromstring(html, guess_charset=True, parser=None):
"""Parse a whole document into a string."""
if not isinstance(html, _strings):
raise TypeError('string required')
if parser is None:
parser = html_parser
return parser.parse(html, useChardet=guess_charset).getroot() | [
"def",
"document_fromstring",
"(",
"html",
",",
"guess_charset",
"=",
"True",
",",
"parser",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"html",
",",
"_strings",
")",
":",
"raise",
"TypeError",
"(",
"'string required'",
")",
"if",
"parser",
"is",
"None",
":",
"parser",
"=",
"html_parser",
"return",
"parser",
".",
"parse",
"(",
"html",
",",
"useChardet",
"=",
"guess_charset",
")",
".",
"getroot",
"(",
")"
] | python | Parse a whole document into a string. | true |
2,467,070 | def _re_flatten(p):
''' Turn all capturing groups in a regular expression pattern into
non-capturing groups. '''
if '(' not in p: return p
return re.sub(r'(\\*)(\(\?P<[^>]*>|\((?!\?))',
lambda m: m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:', p) | [
"def",
"_re_flatten",
"(",
"p",
")",
":",
"if",
"'('",
"not",
"in",
"p",
":",
"return",
"p",
"return",
"re",
".",
"sub",
"(",
"r'(\\\\*)(\\(\\?P<[^>]*>|\\((?!\\?))'",
",",
"lambda",
"m",
":",
"m",
".",
"group",
"(",
"0",
")",
"if",
"len",
"(",
"m",
".",
"group",
"(",
"1",
")",
")",
"%",
"2",
"else",
"m",
".",
"group",
"(",
"1",
")",
"+",
"'(?:'",
",",
"p",
")"
] | python | Turn all capturing groups in a regular expression pattern into
non-capturing groups. | true |
2,467,168 | def get_include():
"""
Returns a list of header include paths (for lxml itself, libxml2
and libxslt) needed to compile C code against lxml if it was built
with statically linked libraries.
"""
import os
lxml_path = __path__[0]
include_path = os.path.join(lxml_path, 'includes')
includes = [include_path, lxml_path]
for name in os.listdir(include_path):
path = os.path.join(include_path, name)
if os.path.isdir(path):
includes.append(path)
return includes | [
"def",
"get_include",
"(",
")",
":",
"import",
"os",
"lxml_path",
"=",
"__path__",
"[",
"0",
"]",
"include_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"lxml_path",
",",
"'includes'",
")",
"includes",
"=",
"[",
"include_path",
",",
"lxml_path",
"]",
"for",
"name",
"in",
"os",
".",
"listdir",
"(",
"include_path",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"include_path",
",",
"name",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
":",
"includes",
".",
"append",
"(",
"path",
")",
"return",
"includes"
] | python | Returns a list of header include paths (for lxml itself, libxml2
and libxslt) needed to compile C code against lxml if it was built
with statically linked libraries. | true |
2,467,237 | def setupmethod(f):
"""Wraps a method so that it performs a check in debug mode if the
first request was already handled.
"""
def wrapper_func(self, *args, **kwargs):
if self.debug and self._got_first_request:
raise AssertionError('A setup function was called after the '
'first request was handled. This usually indicates a bug '
'in the application where a module was not imported '
'and decorators or other functionality was called too late.\n'
'To fix this make sure to import all your view modules, '
'database models and everything related at a central place '
'before the application starts serving requests.')
return f(self, *args, **kwargs)
return update_wrapper(wrapper_func, f) | [
"def",
"setupmethod",
"(",
"f",
")",
":",
"def",
"wrapper_func",
"(",
"self",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"if",
"self",
".",
"debug",
"and",
"self",
".",
"_got_first_request",
":",
"raise",
"AssertionError",
"(",
"'A setup function was called after the '",
"'first request was handled. This usually indicates a bug '",
"'in the application where a module was not imported '",
"'and decorators or other functionality was called too late.\\n'",
"'To fix this make sure to import all your view modules, '",
"'database models and everything related at a central place '",
"'before the application starts serving requests.'",
")",
"return",
"f",
"(",
"self",
",",
"*",
"args",
",",
"**",
"kwargs",
")",
"return",
"update_wrapper",
"(",
"wrapper_func",
",",
"f",
")"
] | python | Wraps a method so that it performs a check in debug mode if the
first request was already handled. | true |
2,467,545 | def _dump_arg_defaults(kwargs):
"""Inject default arguments for dump functions."""
if current_app:
kwargs.setdefault('cls', current_app.json_encoder)
if not current_app.config['JSON_AS_ASCII']:
kwargs.setdefault('ensure_ascii', False)
kwargs.setdefault('sort_keys', current_app.config['JSON_SORT_KEYS'])
else:
kwargs.setdefault('sort_keys', True)
kwargs.setdefault('cls', JSONEncoder) | [
"def",
"_dump_arg_defaults",
"(",
"kwargs",
")",
":",
"if",
"current_app",
":",
"kwargs",
".",
"setdefault",
"(",
"'cls'",
",",
"current_app",
".",
"json_encoder",
")",
"if",
"not",
"current_app",
".",
"config",
"[",
"'JSON_AS_ASCII'",
"]",
":",
"kwargs",
".",
"setdefault",
"(",
"'ensure_ascii'",
",",
"False",
")",
"kwargs",
".",
"setdefault",
"(",
"'sort_keys'",
",",
"current_app",
".",
"config",
"[",
"'JSON_SORT_KEYS'",
"]",
")",
"else",
":",
"kwargs",
".",
"setdefault",
"(",
"'sort_keys'",
",",
"True",
")",
"kwargs",
".",
"setdefault",
"(",
"'cls'",
",",
"JSONEncoder",
")"
] | python | Inject default arguments for dump functions. | true |
2,467,546 | def _load_arg_defaults(kwargs):
"""Inject default arguments for load functions."""
if current_app:
kwargs.setdefault('cls', current_app.json_decoder)
else:
kwargs.setdefault('cls', JSONDecoder) | [
"def",
"_load_arg_defaults",
"(",
"kwargs",
")",
":",
"if",
"current_app",
":",
"kwargs",
".",
"setdefault",
"(",
"'cls'",
",",
"current_app",
".",
"json_decoder",
")",
"else",
":",
"kwargs",
".",
"setdefault",
"(",
"'cls'",
",",
"JSONDecoder",
")"
] | python | Inject default arguments for load functions. | true |
2,467,551 | def htmlsafe_dumps(obj, **kwargs):
"""Works exactly like :func:`dumps` but is safe for use in ``<script>``
tags. It accepts the same arguments and returns a JSON string. Note that
this is available in templates through the ``|tojson`` filter which will
also mark the result as safe. Due to how this function escapes certain
characters this is safe even if used outside of ``<script>`` tags.
The following characters are escaped in strings:
- ``<``
- ``>``
- ``&``
- ``'``
This makes it safe to embed such strings in any place in HTML with the
notable exception of double quoted attributes. In that case single
quote your attributes or HTML escape it in addition.
.. versionchanged:: 0.10
This function's return value is now always safe for HTML usage, even
if outside of script tags or if used in XHTML. This rule does not
hold true when using this function in HTML attributes that are double
quoted. Always single quote attributes if you use the ``|tojson``
filter. Alternatively use ``|tojson|forceescape``.
"""
rv = dumps(obj, **kwargs) \
.replace(u'<', u'\\u003c') \
.replace(u'>', u'\\u003e') \
.replace(u'&', u'\\u0026') \
.replace(u"'", u'\\u0027')
if not _slash_escape:
rv = rv.replace('\\/', '/')
return rv | [
"def",
"htmlsafe_dumps",
"(",
"obj",
",",
"**",
"kwargs",
")",
":",
"rv",
"=",
"dumps",
"(",
"obj",
",",
"**",
"kwargs",
")",
".",
"replace",
"(",
"u'<'",
",",
"u'\\\\u003c'",
")",
".",
"replace",
"(",
"u'>'",
",",
"u'\\\\u003e'",
")",
".",
"replace",
"(",
"u'&'",
",",
"u'\\\\u0026'",
")",
".",
"replace",
"(",
"u\"'\"",
",",
"u'\\\\u0027'",
")",
"if",
"not",
"_slash_escape",
":",
"rv",
"=",
"rv",
".",
"replace",
"(",
"'\\\\/'",
",",
"'/'",
")",
"return",
"rv"
] | python | Works exactly like :func:`dumps` but is safe for use in ``<script>``
tags. It accepts the same arguments and returns a JSON string. Note that
this is available in templates through the ``|tojson`` filter which will
also mark the result as safe. Due to how this function escapes certain
characters this is safe even if used outside of ``<script>`` tags.
The following characters are escaped in strings:
- ``<``
- ``>``
- ``&``
- ``'``
This makes it safe to embed such strings in any place in HTML with the
notable exception of double quoted attributes. In that case single
quote your attributes or HTML escape it in addition.
.. versionchanged:: 0.10
This function's return value is now always safe for HTML usage, even
if outside of script tags or if used in XHTML. This rule does not
hold true when using this function in HTML attributes that are double
quoted. Always single quote attributes if you use the ``|tojson``
filter. Alternatively use ``|tojson|forceescape``. | true |
2,467,552 | def htmlsafe_dump(obj, fp, **kwargs):
"""Like :func:`htmlsafe_dumps` but writes into a file object."""
fp.write(unicode(htmlsafe_dumps(obj, **kwargs))) | [
"def",
"htmlsafe_dump",
"(",
"obj",
",",
"fp",
",",
"**",
"kwargs",
")",
":",
"fp",
".",
"write",
"(",
"unicode",
"(",
"htmlsafe_dumps",
"(",
"obj",
",",
"**",
"kwargs",
")",
")",
")"
] | python | Like :func:`htmlsafe_dumps` but writes into a file object. | true |
2,467,752 | def _strip_postfix(req):
"""
Strip req postfix ( -dev, 0.2, etc )
"""
# FIXME: use package_to_requirement?
match = re.search(r'^(.*?)(?:-dev|-\d.*)$', req)
if match:
# Strip off -dev, -0.2, etc.
req = match.group(1)
return req | [
"def",
"_strip_postfix",
"(",
"req",
")",
":",
"match",
"=",
"re",
".",
"search",
"(",
"r'^(.*?)(?:-dev|-\\d.*)$'",
",",
"req",
")",
"if",
"match",
":",
"req",
"=",
"match",
".",
"group",
"(",
"1",
")",
"return",
"req"
] | python | Strip req postfix ( -dev, 0.2, etc ) | true |
2,467,754 | def _build_editable_options(req):
"""
This method generates a dictionary of the query string
parameters contained in a given editable URL.
"""
regexp = re.compile(r"[\?#&](?P<name>[^&=]+)=(?P<value>[^&=]+)")
matched = regexp.findall(req)
if matched:
ret = dict()
for option in matched:
(name, value) = option
if name in ret:
raise Exception("%s option already defined" % name)
ret[name] = value
return ret
return None | [
"def",
"_build_editable_options",
"(",
"req",
")",
":",
"regexp",
"=",
"re",
".",
"compile",
"(",
"r\"[\\?#&](?P<name>[^&=]+)=(?P<value>[^&=]+)\"",
")",
"matched",
"=",
"regexp",
".",
"findall",
"(",
"req",
")",
"if",
"matched",
":",
"ret",
"=",
"dict",
"(",
")",
"for",
"option",
"in",
"matched",
":",
"(",
"name",
",",
"value",
")",
"=",
"option",
"if",
"name",
"in",
"ret",
":",
"raise",
"Exception",
"(",
"\"%s option already defined\"",
"%",
"name",
")",
"ret",
"[",
"name",
"]",
"=",
"value",
"return",
"ret",
"return",
"None"
] | python | This method generates a dictionary of the query string
parameters contained in a given editable URL. | true |
2,467,882 | def user_cache_dir(appname):
r"""
Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
Typical user cache directories are:
Mac OS X: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Windows: C:\Users\<username>\AppData\Local\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go
in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the
non-roaming app data dir (the default returned by `user_data_dir`). Apps
typically put cache data somewhere *under* the given dir here. Some
examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
"""
if WINDOWS:
# Get the base path
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
# Add our app name and Cache directory to it
path = os.path.join(path, appname, "Cache")
elif sys.platform == "darwin":
# Get the base path
path = os.path.expanduser("~/Library/Caches")
# Add our app name to it
path = os.path.join(path, appname)
else:
# Get the base path
path = os.getenv("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
# Add our app name to it
path = os.path.join(path, appname)
return path | [
"def",
"user_cache_dir",
"(",
"appname",
")",
":",
"if",
"WINDOWS",
":",
"path",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"_get_win_folder",
"(",
"\"CSIDL_LOCAL_APPDATA\"",
")",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"appname",
",",
"\"Cache\"",
")",
"elif",
"sys",
".",
"platform",
"==",
"\"darwin\"",
":",
"path",
"=",
"os",
".",
"path",
".",
"expanduser",
"(",
"\"~/Library/Caches\"",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"appname",
")",
"else",
":",
"path",
"=",
"os",
".",
"getenv",
"(",
"\"XDG_CACHE_HOME\"",
",",
"os",
".",
"path",
".",
"expanduser",
"(",
"\"~/.cache\"",
")",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"appname",
")",
"return",
"path"
] | python | r"""
Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
Typical user cache directories are:
Mac OS X: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Windows: C:\Users\<username>\AppData\Local\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go
in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the
non-roaming app data dir (the default returned by `user_data_dir`). Apps
typically put cache data somewhere *under* the given dir here. Some
examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. | true |
2,467,883 | def user_data_dir(appname, roaming=False):
"""
Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: ~/Library/Application Support/<AppName>
Unix: ~/.local/share/<AppName> # or in
$XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\ ...
...Application Data\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local ...
...Settings\Application Data\<AppName>
Win 7 (not roaming): C:\\Users\<username>\AppData\Local\<AppName>
Win 7 (roaming): C:\\Users\<username>\AppData\Roaming\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by default "~/.local/share/<AppName>".
"""
if WINDOWS:
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.join(os.path.normpath(_get_win_folder(const)), appname)
elif sys.platform == "darwin":
path = os.path.join(
os.path.expanduser('~/Library/Application Support/'),
appname,
)
else:
path = os.path.join(
os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share")),
appname,
)
return path | [
"def",
"user_data_dir",
"(",
"appname",
",",
"roaming",
"=",
"False",
")",
":",
"if",
"WINDOWS",
":",
"const",
"=",
"roaming",
"and",
"\"CSIDL_APPDATA\"",
"or",
"\"CSIDL_LOCAL_APPDATA\"",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"normpath",
"(",
"_get_win_folder",
"(",
"const",
")",
")",
",",
"appname",
")",
"elif",
"sys",
".",
"platform",
"==",
"\"darwin\"",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"expanduser",
"(",
"'~/Library/Application Support/'",
")",
",",
"appname",
",",
")",
"else",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getenv",
"(",
"'XDG_DATA_HOME'",
",",
"os",
".",
"path",
".",
"expanduser",
"(",
"\"~/.local/share\"",
")",
")",
",",
"appname",
",",
")",
"return",
"path"
] | python | Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: ~/Library/Application Support/<AppName>
Unix: ~/.local/share/<AppName> # or in
$XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\ ...
...Application Data\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local ...
...Settings\Application Data\<AppName>
Win 7 (not roaming): C:\\Users\<username>\AppData\Local\<AppName>
Win 7 (roaming): C:\\Users\<username>\AppData\Roaming\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by default "~/.local/share/<AppName>". | true |
2,467,885 | def user_config_dir(appname, roaming=True):
"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"roaming" (boolean, default True) can be set False to not use the
Windows roaming appdata directory. That means that for users on a
Windows network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: same as user_data_dir
Unix: ~/.config/<AppName>
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by deafult "~/.config/<AppName>".
"""
if WINDOWS:
path = user_data_dir(appname, roaming=roaming)
elif sys.platform == "darwin":
path = user_data_dir(appname)
else:
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
path = os.path.join(path, appname)
return path | [
"def",
"user_config_dir",
"(",
"appname",
",",
"roaming",
"=",
"True",
")",
":",
"if",
"WINDOWS",
":",
"path",
"=",
"user_data_dir",
"(",
"appname",
",",
"roaming",
"=",
"roaming",
")",
"elif",
"sys",
".",
"platform",
"==",
"\"darwin\"",
":",
"path",
"=",
"user_data_dir",
"(",
"appname",
")",
"else",
":",
"path",
"=",
"os",
".",
"getenv",
"(",
"'XDG_CONFIG_HOME'",
",",
"os",
".",
"path",
".",
"expanduser",
"(",
"\"~/.config\"",
")",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"appname",
")",
"return",
"path"
] | python | Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"roaming" (boolean, default True) can be set False to not use the
Windows roaming appdata directory. That means that for users on a
Windows network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: same as user_data_dir
Unix: ~/.config/<AppName>
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by deafult "~/.config/<AppName>". | true |
2,467,886 | def site_config_dirs(appname):
"""Return a list of potential user-shared config dirs for this application.
"appname" is the name of application.
Typical user config directories are:
Mac OS X: /Library/Application Support/<AppName>/
Unix: /etc or $XDG_CONFIG_DIRS[i]/<AppName>/ for each value in
$XDG_CONFIG_DIRS
Win XP: C:\Documents and Settings\All Users\Application ...
...Data\<AppName>\
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory
on Vista.)
Win 7: Hidden, but writeable on Win 7:
C:\ProgramData\<AppName>\
"""
if WINDOWS:
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
pathlist = [os.path.join(path, appname)]
elif sys.platform == 'darwin':
pathlist = [os.path.join('/Library/Application Support', appname)]
else:
# try looking in $XDG_CONFIG_DIRS
xdg_config_dirs = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
if xdg_config_dirs:
pathlist = [
os.sep.join([os.path.expanduser(x), appname])
for x in xdg_config_dirs.split(os.pathsep)
]
else:
pathlist = []
# always look in /etc directly as well
pathlist.append('/etc')
return pathlist | [
"def",
"site_config_dirs",
"(",
"appname",
")",
":",
"if",
"WINDOWS",
":",
"path",
"=",
"os",
".",
"path",
".",
"normpath",
"(",
"_get_win_folder",
"(",
"\"CSIDL_COMMON_APPDATA\"",
")",
")",
"pathlist",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"appname",
")",
"]",
"elif",
"sys",
".",
"platform",
"==",
"'darwin'",
":",
"pathlist",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"'/Library/Application Support'",
",",
"appname",
")",
"]",
"else",
":",
"xdg_config_dirs",
"=",
"os",
".",
"getenv",
"(",
"'XDG_CONFIG_DIRS'",
",",
"'/etc/xdg'",
")",
"if",
"xdg_config_dirs",
":",
"pathlist",
"=",
"[",
"os",
".",
"sep",
".",
"join",
"(",
"[",
"os",
".",
"path",
".",
"expanduser",
"(",
"x",
")",
",",
"appname",
"]",
")",
"for",
"x",
"in",
"xdg_config_dirs",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
"]",
"else",
":",
"pathlist",
"=",
"[",
"]",
"pathlist",
".",
"append",
"(",
"'/etc'",
")",
"return",
"pathlist"
] | python | Return a list of potential user-shared config dirs for this application.
"appname" is the name of application.
Typical user config directories are:
Mac OS X: /Library/Application Support/<AppName>/
Unix: /etc or $XDG_CONFIG_DIRS[i]/<AppName>/ for each value in
$XDG_CONFIG_DIRS
Win XP: C:\Documents and Settings\All Users\Application ...
...Data\<AppName>\
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory
on Vista.)
Win 7: Hidden, but writeable on Win 7:
C:\ProgramData\<AppName>\ | true |
2,467,914 | def _iter_module_files():
"""This iterates over all relevant Python files. It goes through all
loaded files from modules, all files in folders of already loaded modules
as well as all files reachable through a package.
"""
# The list call is necessary on Python 3 in case the module
# dictionary modifies during iteration.
for module in list(sys.modules.values()):
if module is None:
continue
filename = getattr(module, '__file__', None)
if filename:
old = None
while not os.path.isfile(filename):
old = filename
filename = os.path.dirname(filename)
if filename == old:
break
else:
if filename[-4:] in ('.pyc', '.pyo'):
filename = filename[:-1]
yield filename | [
"def",
"_iter_module_files",
"(",
")",
":",
"for",
"module",
"in",
"list",
"(",
"sys",
".",
"modules",
".",
"values",
"(",
")",
")",
":",
"if",
"module",
"is",
"None",
":",
"continue",
"filename",
"=",
"getattr",
"(",
"module",
",",
"'__file__'",
",",
"None",
")",
"if",
"filename",
":",
"old",
"=",
"None",
"while",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
":",
"old",
"=",
"filename",
"filename",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"filename",
")",
"if",
"filename",
"==",
"old",
":",
"break",
"else",
":",
"if",
"filename",
"[",
"-",
"4",
":",
"]",
"in",
"(",
"'.pyc'",
",",
"'.pyo'",
")",
":",
"filename",
"=",
"filename",
"[",
":",
"-",
"1",
"]",
"yield",
"filename"
] | python | This iterates over all relevant Python files. It goes through all
loaded files from modules, all files in folders of already loaded modules
as well as all files reachable through a package. | true |
2,467,915 | def restart_with_reloader(self):
"""Spawn a new Python interpreter with the same arguments as this one,
but running the reloader thread.
"""
while 1:
_log('info', ' * Restarting with %s' % self.name)
args = [sys.executable] + sys.argv
new_environ = os.environ.copy()
new_environ['WERKZEUG_RUN_MAIN'] = 'true'
# a weird bug on windows. sometimes unicode strings end up in the
# environment and subprocess.call does not like this, encode them
# to latin1 and continue.
if os.name == 'nt' and PY2:
for key, value in iteritems(new_environ):
if isinstance(value, text_type):
new_environ[key] = value.encode('iso-8859-1')
exit_code = subprocess.call(args, env=new_environ)
if exit_code != 3:
return exit_code | [
"def",
"restart_with_reloader",
"(",
"self",
")",
":",
"while",
"1",
":",
"_log",
"(",
"'info'",
",",
"' * Restarting with %s'",
"%",
"self",
".",
"name",
")",
"args",
"=",
"[",
"sys",
".",
"executable",
"]",
"+",
"sys",
".",
"argv",
"new_environ",
"=",
"os",
".",
"environ",
".",
"copy",
"(",
")",
"new_environ",
"[",
"'WERKZEUG_RUN_MAIN'",
"]",
"=",
"'true'",
"if",
"os",
".",
"name",
"==",
"'nt'",
"and",
"PY2",
":",
"for",
"key",
",",
"value",
"in",
"iteritems",
"(",
"new_environ",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"text_type",
")",
":",
"new_environ",
"[",
"key",
"]",
"=",
"value",
".",
"encode",
"(",
"'iso-8859-1'",
")",
"exit_code",
"=",
"subprocess",
".",
"call",
"(",
"args",
",",
"env",
"=",
"new_environ",
")",
"if",
"exit_code",
"!=",
"3",
":",
"return",
"exit_code"
] | python | Spawn a new Python interpreter with the same arguments as this one,
but running the reloader thread. | true |
2,467,949 | def find_ca_bundle():
"""Return an existing CA bundle path, or None"""
if os.name=='nt':
return get_win_certfile()
else:
for cert_path in cert_paths:
if os.path.isfile(cert_path):
return cert_path
try:
return pkg_resources.resource_filename('certifi', 'cacert.pem')
except (ImportError, ResolutionError, ExtractionError):
return None | [
"def",
"find_ca_bundle",
"(",
")",
":",
"if",
"os",
".",
"name",
"==",
"'nt'",
":",
"return",
"get_win_certfile",
"(",
")",
"else",
":",
"for",
"cert_path",
"in",
"cert_paths",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"cert_path",
")",
":",
"return",
"cert_path",
"try",
":",
"return",
"pkg_resources",
".",
"resource_filename",
"(",
"'certifi'",
",",
"'cacert.pem'",
")",
"except",
"(",
"ImportError",
",",
"ResolutionError",
",",
"ExtractionError",
")",
":",
"return",
"None"
] | python | Return an existing CA bundle path, or None | true |
2,468,083 | def make_ssl_devcert(base_path, host=None, cn=None):
"""Creates an SSL key for development. This should be used instead of
the ``'adhoc'`` key which generates a new cert on each server start.
It accepts a path for where it should store the key and cert and
either a host or CN. If a host is given it will use the CN
``*.host/CN=host``.
For more information see :func:`run_simple`.
.. versionadded:: 0.9
:param base_path: the path to the certificate and key. The extension
``.crt`` is added for the certificate, ``.key`` is
added for the key.
:param host: the name of the host. This can be used as an alternative
for the `cn`.
:param cn: the `CN` to use.
"""
from OpenSSL import crypto
if host is not None:
cn = '*.%s/CN=%s' % (host, host)
cert, pkey = generate_adhoc_ssl_pair(cn=cn)
cert_file = base_path + '.crt'
pkey_file = base_path + '.key'
with open(cert_file, 'wb') as f:
f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
with open(pkey_file, 'wb') as f:
f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
return cert_file, pkey_file | [
"def",
"make_ssl_devcert",
"(",
"base_path",
",",
"host",
"=",
"None",
",",
"cn",
"=",
"None",
")",
":",
"from",
"OpenSSL",
"import",
"crypto",
"if",
"host",
"is",
"not",
"None",
":",
"cn",
"=",
"'*.%s/CN=%s'",
"%",
"(",
"host",
",",
"host",
")",
"cert",
",",
"pkey",
"=",
"generate_adhoc_ssl_pair",
"(",
"cn",
"=",
"cn",
")",
"cert_file",
"=",
"base_path",
"+",
"'.crt'",
"pkey_file",
"=",
"base_path",
"+",
"'.key'",
"with",
"open",
"(",
"cert_file",
",",
"'wb'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"crypto",
".",
"dump_certificate",
"(",
"crypto",
".",
"FILETYPE_PEM",
",",
"cert",
")",
")",
"with",
"open",
"(",
"pkey_file",
",",
"'wb'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"crypto",
".",
"dump_privatekey",
"(",
"crypto",
".",
"FILETYPE_PEM",
",",
"pkey",
")",
")",
"return",
"cert_file",
",",
"pkey_file"
] | python | Creates an SSL key for development. This should be used instead of
the ``'adhoc'`` key which generates a new cert on each server start.
It accepts a path for where it should store the key and cert and
either a host or CN. If a host is given it will use the CN
``*.host/CN=host``.
For more information see :func:`run_simple`.
.. versionadded:: 0.9
:param base_path: the path to the certificate and key. The extension
``.crt`` is added for the certificate, ``.key`` is
added for the key.
:param host: the name of the host. This can be used as an alternative
for the `cn`.
:param cn: the `CN` to use. | true |
2,468,124 | def stylesheet_params(**kwargs):
"""Convert keyword args to a dictionary of stylesheet parameters.
XSL stylesheet parameters must be XPath expressions, i.e.:
* string expressions, like "'5'"
* simple (number) expressions, like "5"
* valid XPath expressions, like "/a/b/text()"
This function converts native Python keyword arguments to stylesheet
parameters following these rules:
If an arg is a string wrap it with XSLT.strparam().
If an arg is an XPath object use its path string.
If arg is None raise TypeError.
Else convert arg to string.
"""
result = {}
for key, val in kwargs.items():
if isinstance(val, basestring):
val = _etree.XSLT.strparam(val)
elif val is None:
raise TypeError('None not allowed as a stylesheet parameter')
elif not isinstance(val, _etree.XPath):
val = unicode(val)
result[key] = val
return result | [
"def",
"stylesheet_params",
"(",
"**",
"kwargs",
")",
":",
"result",
"=",
"{",
"}",
"for",
"key",
",",
"val",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"basestring",
")",
":",
"val",
"=",
"_etree",
".",
"XSLT",
".",
"strparam",
"(",
"val",
")",
"elif",
"val",
"is",
"None",
":",
"raise",
"TypeError",
"(",
"'None not allowed as a stylesheet parameter'",
")",
"elif",
"not",
"isinstance",
"(",
"val",
",",
"_etree",
".",
"XPath",
")",
":",
"val",
"=",
"unicode",
"(",
"val",
")",
"result",
"[",
"key",
"]",
"=",
"val",
"return",
"result"
] | python | Convert keyword args to a dictionary of stylesheet parameters.
XSL stylesheet parameters must be XPath expressions, i.e.:
* string expressions, like "'5'"
* simple (number) expressions, like "5"
* valid XPath expressions, like "/a/b/text()"
This function converts native Python keyword arguments to stylesheet
parameters following these rules:
If an arg is a string wrap it with XSLT.strparam().
If an arg is an XPath object use its path string.
If arg is None raise TypeError.
Else convert arg to string. | true |
2,468,125 | def _stylesheet_param_dict(paramsDict, kwargsDict):
"""Return a copy of paramsDict, updated with kwargsDict entries, wrapped as
stylesheet arguments.
kwargsDict entries with a value of None are ignored.
"""
# beware of changing mutable default arg
paramsDict = dict(paramsDict)
for k, v in kwargsDict.items():
if v is not None: # None values do not override
paramsDict[k] = v
paramsDict = stylesheet_params(**paramsDict)
return paramsDict | [
"def",
"_stylesheet_param_dict",
"(",
"paramsDict",
",",
"kwargsDict",
")",
":",
"paramsDict",
"=",
"dict",
"(",
"paramsDict",
")",
"for",
"k",
",",
"v",
"in",
"kwargsDict",
".",
"items",
"(",
")",
":",
"if",
"v",
"is",
"not",
"None",
":",
"paramsDict",
"[",
"k",
"]",
"=",
"v",
"paramsDict",
"=",
"stylesheet_params",
"(",
"**",
"paramsDict",
")",
"return",
"paramsDict"
] | python | Return a copy of paramsDict, updated with kwargsDict entries, wrapped as
stylesheet arguments.
kwargsDict entries with a value of None are ignored. | true |
2,468,355 | def distros_for_location(location, basename, metadata=None):
"""Yield egg or source distribution objects based on basename"""
if basename.endswith('.egg.zip'):
basename = basename[:-4] # strip the .zip
if basename.endswith('.egg') and '-' in basename:
# only one, unambiguous interpretation
return [Distribution.from_location(location, basename, metadata)]
if basename.endswith('.exe'):
win_base, py_ver, platform = parse_bdist_wininst(basename)
if win_base is not None:
return interpret_distro_name(
location, win_base, metadata, py_ver, BINARY_DIST, platform
)
# Try source distro extensions (.zip, .tgz, etc.)
#
for ext in EXTENSIONS:
if basename.endswith(ext):
basename = basename[:-len(ext)]
return interpret_distro_name(location, basename, metadata)
return [] | [
"def",
"distros_for_location",
"(",
"location",
",",
"basename",
",",
"metadata",
"=",
"None",
")",
":",
"if",
"basename",
".",
"endswith",
"(",
"'.egg.zip'",
")",
":",
"basename",
"=",
"basename",
"[",
":",
"-",
"4",
"]",
"if",
"basename",
".",
"endswith",
"(",
"'.egg'",
")",
"and",
"'-'",
"in",
"basename",
":",
"return",
"[",
"Distribution",
".",
"from_location",
"(",
"location",
",",
"basename",
",",
"metadata",
")",
"]",
"if",
"basename",
".",
"endswith",
"(",
"'.exe'",
")",
":",
"win_base",
",",
"py_ver",
",",
"platform",
"=",
"parse_bdist_wininst",
"(",
"basename",
")",
"if",
"win_base",
"is",
"not",
"None",
":",
"return",
"interpret_distro_name",
"(",
"location",
",",
"win_base",
",",
"metadata",
",",
"py_ver",
",",
"BINARY_DIST",
",",
"platform",
")",
"for",
"ext",
"in",
"EXTENSIONS",
":",
"if",
"basename",
".",
"endswith",
"(",
"ext",
")",
":",
"basename",
"=",
"basename",
"[",
":",
"-",
"len",
"(",
"ext",
")",
"]",
"return",
"interpret_distro_name",
"(",
"location",
",",
"basename",
",",
"metadata",
")",
"return",
"[",
"]"
] | python | Yield egg or source distribution objects based on basename | true |
2,468,356 | def find_external_links(url, page):
"""Find rel="homepage" and rel="download" links in `page`, yielding URLs"""
for match in REL.finditer(page):
tag, rel = match.groups()
rels = set(map(str.strip, rel.lower().split(',')))
if 'homepage' in rels or 'download' in rels:
for match in HREF.finditer(tag):
yield urljoin(url, htmldecode(match.group(1)))
for tag in ("<th>Home Page", "<th>Download URL"):
pos = page.find(tag)
if pos!=-1:
match = HREF.search(page,pos)
if match:
yield urljoin(url, htmldecode(match.group(1))) | [
"def",
"find_external_links",
"(",
"url",
",",
"page",
")",
":",
"for",
"match",
"in",
"REL",
".",
"finditer",
"(",
"page",
")",
":",
"tag",
",",
"rel",
"=",
"match",
".",
"groups",
"(",
")",
"rels",
"=",
"set",
"(",
"map",
"(",
"str",
".",
"strip",
",",
"rel",
".",
"lower",
"(",
")",
".",
"split",
"(",
"','",
")",
")",
")",
"if",
"'homepage'",
"in",
"rels",
"or",
"'download'",
"in",
"rels",
":",
"for",
"match",
"in",
"HREF",
".",
"finditer",
"(",
"tag",
")",
":",
"yield",
"urljoin",
"(",
"url",
",",
"htmldecode",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
")",
"for",
"tag",
"in",
"(",
"\"<th>Home Page\"",
",",
"\"<th>Download URL\"",
")",
":",
"pos",
"=",
"page",
".",
"find",
"(",
"tag",
")",
"if",
"pos",
"!=",
"-",
"1",
":",
"match",
"=",
"HREF",
".",
"search",
"(",
"page",
",",
"pos",
")",
"if",
"match",
":",
"yield",
"urljoin",
"(",
"url",
",",
"htmldecode",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
")"
] | python | Find rel="homepage" and rel="download" links in `page`, yielding URLs | true |
2,468,359 | def _encode_auth(auth):
"""
A function compatible with Python 2.3-3.3 that will encode
auth from a URL suitable for an HTTP header.
>>> str(_encode_auth('username%3Apassword'))
'dXNlcm5hbWU6cGFzc3dvcmQ='
Long auth strings should not cause a newline to be inserted.
>>> long_auth = 'username:' + 'password'*10
>>> chr(10) in str(_encode_auth(long_auth))
False
"""
auth_s = unquote(auth)
# convert to bytes
auth_bytes = auth_s.encode()
# use the legacy interface for Python 2.3 support
encoded_bytes = base64.encodestring(auth_bytes)
# convert back to a string
encoded = encoded_bytes.decode()
# strip the trailing carriage return
return encoded.replace('\n','') | [
"def",
"_encode_auth",
"(",
"auth",
")",
":",
"auth_s",
"=",
"unquote",
"(",
"auth",
")",
"auth_bytes",
"=",
"auth_s",
".",
"encode",
"(",
")",
"encoded_bytes",
"=",
"base64",
".",
"encodestring",
"(",
"auth_bytes",
")",
"encoded",
"=",
"encoded_bytes",
".",
"decode",
"(",
")",
"return",
"encoded",
".",
"replace",
"(",
"'\\n'",
",",
"''",
")"
] | python | A function compatible with Python 2.3-3.3 that will encode
auth from a URL suitable for an HTTP header.
>>> str(_encode_auth('username%3Apassword'))
'dXNlcm5hbWU6cGFzc3dvcmQ='
Long auth strings should not cause a newline to be inserted.
>>> long_auth = 'username:' + 'password'*10
>>> chr(10) in str(_encode_auth(long_auth))
False | true |
2,468,584 | def removeduppaths():
""" Remove duplicate entries from sys.path along with making them
absolute"""
# This ensures that the initial path provided by the interpreter contains
# only absolute pathnames, even if we're running from the build directory.
L = []
known_paths = set()
for dir in sys.path:
# Filter out duplicate paths (on case-insensitive file systems also
# if they only differ in case); turn relative paths into absolute
# paths.
dir, dircase = makepath(dir)
if not dircase in known_paths:
L.append(dir)
known_paths.add(dircase)
sys.path[:] = L
return known_paths | [
"def",
"removeduppaths",
"(",
")",
":",
"L",
"=",
"[",
"]",
"known_paths",
"=",
"set",
"(",
")",
"for",
"dir",
"in",
"sys",
".",
"path",
":",
"dir",
",",
"dircase",
"=",
"makepath",
"(",
"dir",
")",
"if",
"not",
"dircase",
"in",
"known_paths",
":",
"L",
".",
"append",
"(",
"dir",
")",
"known_paths",
".",
"add",
"(",
"dircase",
")",
"sys",
".",
"path",
"[",
":",
"]",
"=",
"L",
"return",
"known_paths"
] | python | Remove duplicate entries from sys.path along with making them
absolute | true |
2,468,586 | def _init_pathinfo():
"""Return a set containing all existing directory entries from sys.path"""
d = set()
for dir in sys.path:
try:
if os.path.isdir(dir):
dir, dircase = makepath(dir)
d.add(dircase)
except TypeError:
continue
return d | [
"def",
"_init_pathinfo",
"(",
")",
":",
"d",
"=",
"set",
"(",
")",
"for",
"dir",
"in",
"sys",
".",
"path",
":",
"try",
":",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"dir",
")",
":",
"dir",
",",
"dircase",
"=",
"makepath",
"(",
"dir",
")",
"d",
".",
"add",
"(",
"dircase",
")",
"except",
"TypeError",
":",
"continue",
"return",
"d"
] | python | Return a set containing all existing directory entries from sys.path | true |
2,468,590 | def check_enableusersite():
"""Check if user site directory is safe for inclusion
The function tests for the command line flag (including environment var),
process uid/gid equal to effective uid/gid.
None: Disabled for security reasons
False: Disabled by user (command line option)
True: Safe and enabled
"""
if hasattr(sys, 'flags') and getattr(sys.flags, 'no_user_site', False):
return False
if hasattr(os, "getuid") and hasattr(os, "geteuid"):
# check process uid == effective uid
if os.geteuid() != os.getuid():
return None
if hasattr(os, "getgid") and hasattr(os, "getegid"):
# check process gid == effective gid
if os.getegid() != os.getgid():
return None
return True | [
"def",
"check_enableusersite",
"(",
")",
":",
"if",
"hasattr",
"(",
"sys",
",",
"'flags'",
")",
"and",
"getattr",
"(",
"sys",
".",
"flags",
",",
"'no_user_site'",
",",
"False",
")",
":",
"return",
"False",
"if",
"hasattr",
"(",
"os",
",",
"\"getuid\"",
")",
"and",
"hasattr",
"(",
"os",
",",
"\"geteuid\"",
")",
":",
"if",
"os",
".",
"geteuid",
"(",
")",
"!=",
"os",
".",
"getuid",
"(",
")",
":",
"return",
"None",
"if",
"hasattr",
"(",
"os",
",",
"\"getgid\"",
")",
"and",
"hasattr",
"(",
"os",
",",
"\"getegid\"",
")",
":",
"if",
"os",
".",
"getegid",
"(",
")",
"!=",
"os",
".",
"getgid",
"(",
")",
":",
"return",
"None",
"return",
"True"
] | python | Check if user site directory is safe for inclusion
The function tests for the command line flag (including environment var),
process uid/gid equal to effective uid/gid.
None: Disabled for security reasons
False: Disabled by user (command line option)
True: Safe and enabled | true |
2,468,591 | def addusersitepackages(known_paths):
"""Add a per user site-package to sys.path
Each user has its own python directory with site-packages in the
home directory.
USER_BASE is the root directory for all Python versions
USER_SITE is the user specific site-packages directory
USER_SITE/.. can be used for data.
"""
global USER_BASE, USER_SITE, ENABLE_USER_SITE
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
#if sys.platform in ('os2emx', 'riscos'):
# # Don't know what to put here
# USER_BASE = ''
# USER_SITE = ''
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
if env_base:
USER_BASE = env_base
else:
USER_BASE = joinuser(base, "Python")
USER_SITE = os.path.join(USER_BASE,
"Python" + sys.version[0] + sys.version[2],
"site-packages")
else:
if env_base:
USER_BASE = env_base
else:
USER_BASE = joinuser("~", ".local")
USER_SITE = os.path.join(USER_BASE, "lib",
"python" + sys.version[:3],
"site-packages")
if ENABLE_USER_SITE and os.path.isdir(USER_SITE):
addsitedir(USER_SITE, known_paths)
if ENABLE_USER_SITE:
for dist_libdir in ("lib", "local/lib"):
user_site = os.path.join(USER_BASE, dist_libdir,
"python" + sys.version[:3],
"dist-packages")
if os.path.isdir(user_site):
addsitedir(user_site, known_paths)
return known_paths | [
"def",
"addusersitepackages",
"(",
"known_paths",
")",
":",
"global",
"USER_BASE",
",",
"USER_SITE",
",",
"ENABLE_USER_SITE",
"env_base",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"PYTHONUSERBASE\"",
",",
"None",
")",
"def",
"joinuser",
"(",
"*",
"args",
")",
":",
"return",
"os",
".",
"path",
".",
"expanduser",
"(",
"os",
".",
"path",
".",
"join",
"(",
"*",
"args",
")",
")",
"if",
"os",
".",
"name",
"==",
"\"nt\"",
":",
"base",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"\"APPDATA\"",
")",
"or",
"\"~\"",
"if",
"env_base",
":",
"USER_BASE",
"=",
"env_base",
"else",
":",
"USER_BASE",
"=",
"joinuser",
"(",
"base",
",",
"\"Python\"",
")",
"USER_SITE",
"=",
"os",
".",
"path",
".",
"join",
"(",
"USER_BASE",
",",
"\"Python\"",
"+",
"sys",
".",
"version",
"[",
"0",
"]",
"+",
"sys",
".",
"version",
"[",
"2",
"]",
",",
"\"site-packages\"",
")",
"else",
":",
"if",
"env_base",
":",
"USER_BASE",
"=",
"env_base",
"else",
":",
"USER_BASE",
"=",
"joinuser",
"(",
"\"~\"",
",",
"\".local\"",
")",
"USER_SITE",
"=",
"os",
".",
"path",
".",
"join",
"(",
"USER_BASE",
",",
"\"lib\"",
",",
"\"python\"",
"+",
"sys",
".",
"version",
"[",
":",
"3",
"]",
",",
"\"site-packages\"",
")",
"if",
"ENABLE_USER_SITE",
"and",
"os",
".",
"path",
".",
"isdir",
"(",
"USER_SITE",
")",
":",
"addsitedir",
"(",
"USER_SITE",
",",
"known_paths",
")",
"if",
"ENABLE_USER_SITE",
":",
"for",
"dist_libdir",
"in",
"(",
"\"lib\"",
",",
"\"local/lib\"",
")",
":",
"user_site",
"=",
"os",
".",
"path",
".",
"join",
"(",
"USER_BASE",
",",
"dist_libdir",
",",
"\"python\"",
"+",
"sys",
".",
"version",
"[",
":",
"3",
"]",
",",
"\"dist-packages\"",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"user_site",
")",
":",
"addsitedir",
"(",
"user_site",
",",
"known_paths",
")",
"return",
"known_paths"
] | python | Add a per user site-package to sys.path
Each user has its own python directory with site-packages in the
home directory.
USER_BASE is the root directory for all Python versions
USER_SITE is the user specific site-packages directory
USER_SITE/.. can be used for data. | true |
2,468,592 | def setBEGINLIBPATH():
"""The OS/2 EMX port has optional extension modules that do double duty
as DLLs (and must use the .DLL file extension) for other extensions.
The library search path needs to be amended so these will be found
during module import. Use BEGINLIBPATH so that these are at the start
of the library search path.
"""
dllpath = os.path.join(sys.prefix, "Lib", "lib-dynload")
libpath = os.environ['BEGINLIBPATH'].split(';')
if libpath[-1]:
libpath.append(dllpath)
else:
libpath[-1] = dllpath
os.environ['BEGINLIBPATH'] = ';'.join(libpath) | [
"def",
"setBEGINLIBPATH",
"(",
")",
":",
"dllpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"sys",
".",
"prefix",
",",
"\"Lib\"",
",",
"\"lib-dynload\"",
")",
"libpath",
"=",
"os",
".",
"environ",
"[",
"'BEGINLIBPATH'",
"]",
".",
"split",
"(",
"';'",
")",
"if",
"libpath",
"[",
"-",
"1",
"]",
":",
"libpath",
".",
"append",
"(",
"dllpath",
")",
"else",
":",
"libpath",
"[",
"-",
"1",
"]",
"=",
"dllpath",
"os",
".",
"environ",
"[",
"'BEGINLIBPATH'",
"]",
"=",
"';'",
".",
"join",
"(",
"libpath",
")"
] | python | The OS/2 EMX port has optional extension modules that do double duty
as DLLs (and must use the .DLL file extension) for other extensions.
The library search path needs to be amended so these will be found
during module import. Use BEGINLIBPATH so that these are at the start
of the library search path. | true |
2,468,593 | def setquit():
"""Define new built-ins 'quit' and 'exit'.
These are simply strings that display a hint on how to exit.
"""
if os.sep == ':':
eof = 'Cmd-Q'
elif os.sep == '\\':
eof = 'Ctrl-Z plus Return'
else:
eof = 'Ctrl-D (i.e. EOF)'
class Quitter(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Use %s() or %s to exit' % (self.name, eof)
def __call__(self, code=None):
# Shells like IDLE catch the SystemExit, but listen when their
# stdin wrapper is closed.
try:
sys.stdin.close()
except:
pass
raise SystemExit(code)
builtins.quit = Quitter('quit')
builtins.exit = Quitter('exit') | [
"def",
"setquit",
"(",
")",
":",
"if",
"os",
".",
"sep",
"==",
"':'",
":",
"eof",
"=",
"'Cmd-Q'",
"elif",
"os",
".",
"sep",
"==",
"'\\\\'",
":",
"eof",
"=",
"'Ctrl-Z plus Return'",
"else",
":",
"eof",
"=",
"'Ctrl-D (i.e. EOF)'",
"class",
"Quitter",
"(",
"object",
")",
":",
"def",
"__init__",
"(",
"self",
",",
"name",
")",
":",
"self",
".",
"name",
"=",
"name",
"def",
"__repr__",
"(",
"self",
")",
":",
"return",
"'Use %s() or %s to exit'",
"%",
"(",
"self",
".",
"name",
",",
"eof",
")",
"def",
"__call__",
"(",
"self",
",",
"code",
"=",
"None",
")",
":",
"try",
":",
"sys",
".",
"stdin",
".",
"close",
"(",
")",
"except",
":",
"pass",
"raise",
"SystemExit",
"(",
"code",
")",
"builtins",
".",
"quit",
"=",
"Quitter",
"(",
"'quit'",
")",
"builtins",
".",
"exit",
"=",
"Quitter",
"(",
"'exit'",
")"
] | python | Define new built-ins 'quit' and 'exit'.
These are simply strings that display a hint on how to exit. | true |
2,468,595 | def aliasmbcs():
"""On Windows, some default encodings are not provided by Python,
while they are always available as "mbcs" in each locale. Make
them usable by aliasing to "mbcs" in such a case."""
if sys.platform == 'win32':
import locale, codecs
enc = locale.getdefaultlocale()[1]
if enc.startswith('cp'): # "cp***" ?
try:
codecs.lookup(enc)
except LookupError:
import encodings
encodings._cache[enc] = encodings._unknown
encodings.aliases.aliases[enc] = 'mbcs' | [
"def",
"aliasmbcs",
"(",
")",
":",
"if",
"sys",
".",
"platform",
"==",
"'win32'",
":",
"import",
"locale",
",",
"codecs",
"enc",
"=",
"locale",
".",
"getdefaultlocale",
"(",
")",
"[",
"1",
"]",
"if",
"enc",
".",
"startswith",
"(",
"'cp'",
")",
":",
"try",
":",
"codecs",
".",
"lookup",
"(",
"enc",
")",
"except",
"LookupError",
":",
"import",
"encodings",
"encodings",
".",
"_cache",
"[",
"enc",
"]",
"=",
"encodings",
".",
"_unknown",
"encodings",
".",
"aliases",
".",
"aliases",
"[",
"enc",
"]",
"=",
"'mbcs'"
] | python | On Windows, some default encodings are not provided by Python,
while they are always available as "mbcs" in each locale. Make
them usable by aliasing to "mbcs" in such a case. | true |
2,468,596 | def setencoding():
"""Set the string encoding used by the Unicode implementation. The
default is 'ascii', but if you're willing to experiment, you can
change this."""
encoding = "ascii" # Default value set by _PyUnicode_Init()
if 0:
# Enable to support locale aware default string encodings.
import locale
loc = locale.getdefaultlocale()
if loc[1]:
encoding = loc[1]
if 0:
# Enable to switch off string to Unicode coercion and implicit
# Unicode to string conversion.
encoding = "undefined"
if encoding != "ascii":
# On Non-Unicode builds this will raise an AttributeError...
sys.setdefaultencoding(encoding) | [
"def",
"setencoding",
"(",
")",
":",
"encoding",
"=",
"\"ascii\"",
"if",
"0",
":",
"import",
"locale",
"loc",
"=",
"locale",
".",
"getdefaultlocale",
"(",
")",
"if",
"loc",
"[",
"1",
"]",
":",
"encoding",
"=",
"loc",
"[",
"1",
"]",
"if",
"0",
":",
"encoding",
"=",
"\"undefined\"",
"if",
"encoding",
"!=",
"\"ascii\"",
":",
"sys",
".",
"setdefaultencoding",
"(",
"encoding",
")"
] | python | Set the string encoding used by the Unicode implementation. The
default is 'ascii', but if you're willing to experiment, you can
change this. | true |
2,468,597 | def force_global_eggs_after_local_site_packages():
"""
Force easy_installed eggs in the global environment to get placed
in sys.path after all packages inside the virtualenv. This
maintains the "least surprise" result that packages in the
virtualenv always mask global packages, never the other way
around.
"""
egginsert = getattr(sys, '__egginsert', 0)
for i, path in enumerate(sys.path):
if i > egginsert and path.startswith(sys.prefix):
egginsert = i
sys.__egginsert = egginsert + 1 | [
"def",
"force_global_eggs_after_local_site_packages",
"(",
")",
":",
"egginsert",
"=",
"getattr",
"(",
"sys",
",",
"'__egginsert'",
",",
"0",
")",
"for",
"i",
",",
"path",
"in",
"enumerate",
"(",
"sys",
".",
"path",
")",
":",
"if",
"i",
">",
"egginsert",
"and",
"path",
".",
"startswith",
"(",
"sys",
".",
"prefix",
")",
":",
"egginsert",
"=",
"i",
"sys",
".",
"__egginsert",
"=",
"egginsert",
"+",
"1"
] | python | Force easy_installed eggs in the global environment to get placed
in sys.path after all packages inside the virtualenv. This
maintains the "least surprise" result that packages in the
virtualenv always mask global packages, never the other way
around. | true |
2,468,598 | def fixclasspath():
"""Adjust the special classpath sys.path entries for Jython. These
entries should follow the base virtualenv lib directories.
"""
paths = []
classpaths = []
for path in sys.path:
if path == '__classpath__' or path.startswith('__pyclasspath__'):
classpaths.append(path)
else:
paths.append(path)
sys.path = paths
sys.path.extend(classpaths) | [
"def",
"fixclasspath",
"(",
")",
":",
"paths",
"=",
"[",
"]",
"classpaths",
"=",
"[",
"]",
"for",
"path",
"in",
"sys",
".",
"path",
":",
"if",
"path",
"==",
"'__classpath__'",
"or",
"path",
".",
"startswith",
"(",
"'__pyclasspath__'",
")",
":",
"classpaths",
".",
"append",
"(",
"path",
")",
"else",
":",
"paths",
".",
"append",
"(",
"path",
")",
"sys",
".",
"path",
"=",
"paths",
"sys",
".",
"path",
".",
"extend",
"(",
"classpaths",
")"
] | python | Adjust the special classpath sys.path entries for Jython. These
entries should follow the base virtualenv lib directories. | true |
2,468,717 | def timesince(d, now=None):
"""
Takes two datetime objects and returns the time between d and now
as a nicely formatted string, e.g. "10 minutes". If d occurs after now,
then "0 minutes" is returned.
Units used are years, months, weeks, days, hours, and minutes.
Seconds and microseconds are ignored. Up to two adjacent units will be
displayed. For example, "2 weeks, 3 days" and "1 year, 3 months" are
possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not.
Adapted from http://blog.natbat.co.uk/archive/2003/Jun/14/time_since
"""
chunks = (
(60 * 60 * 24 * 365, lambda n: ungettext('year', 'years', n)),
(60 * 60 * 24 * 30, lambda n: ungettext('month', 'months', n)),
(60 * 60 * 24 * 7, lambda n : ungettext('week', 'weeks', n)),
(60 * 60 * 24, lambda n : ungettext('day', 'days', n)),
(60 * 60, lambda n: ungettext('hour', 'hours', n)),
(60, lambda n: ungettext('minute', 'minutes', n))
)
# Convert datetime.date to datetime.datetime for comparison.
if not isinstance(d, datetime.datetime):
d = datetime.datetime(d.year, d.month, d.day)
if now and not isinstance(now, datetime.datetime):
now = datetime.datetime(now.year, now.month, now.day)
if not now:
if d.tzinfo:
now = datetime.datetime.now(LocalTimezone(d))
else:
now = datetime.datetime.now()
# ignore microsecond part of 'd' since we removed it from 'now'
delta = now - (d - datetime.timedelta(0, 0, d.microsecond))
since = delta.days * 24 * 60 * 60 + delta.seconds
if since <= 0:
# d is in the future compared to now, stop processing.
return u'0 ' + ugettext('minutes')
for i, (seconds, name) in enumerate(chunks):
count = since // seconds
if count != 0:
break
s = ugettext('%(number)d %(type)s') % {'number': count, 'type': name(count)}
if i + 1 < len(chunks):
# Now get the second item
seconds2, name2 = chunks[i + 1]
count2 = (since - (seconds * count)) // seconds2
if count2 != 0:
s += ugettext(', %(number)d %(type)s') % {'number': count2, 'type': name2(count2)}
return s | [
"def",
"timesince",
"(",
"d",
",",
"now",
"=",
"None",
")",
":",
"chunks",
"=",
"(",
"(",
"60",
"*",
"60",
"*",
"24",
"*",
"365",
",",
"lambda",
"n",
":",
"ungettext",
"(",
"'year'",
",",
"'years'",
",",
"n",
")",
")",
",",
"(",
"60",
"*",
"60",
"*",
"24",
"*",
"30",
",",
"lambda",
"n",
":",
"ungettext",
"(",
"'month'",
",",
"'months'",
",",
"n",
")",
")",
",",
"(",
"60",
"*",
"60",
"*",
"24",
"*",
"7",
",",
"lambda",
"n",
":",
"ungettext",
"(",
"'week'",
",",
"'weeks'",
",",
"n",
")",
")",
",",
"(",
"60",
"*",
"60",
"*",
"24",
",",
"lambda",
"n",
":",
"ungettext",
"(",
"'day'",
",",
"'days'",
",",
"n",
")",
")",
",",
"(",
"60",
"*",
"60",
",",
"lambda",
"n",
":",
"ungettext",
"(",
"'hour'",
",",
"'hours'",
",",
"n",
")",
")",
",",
"(",
"60",
",",
"lambda",
"n",
":",
"ungettext",
"(",
"'minute'",
",",
"'minutes'",
",",
"n",
")",
")",
")",
"if",
"not",
"isinstance",
"(",
"d",
",",
"datetime",
".",
"datetime",
")",
":",
"d",
"=",
"datetime",
".",
"datetime",
"(",
"d",
".",
"year",
",",
"d",
".",
"month",
",",
"d",
".",
"day",
")",
"if",
"now",
"and",
"not",
"isinstance",
"(",
"now",
",",
"datetime",
".",
"datetime",
")",
":",
"now",
"=",
"datetime",
".",
"datetime",
"(",
"now",
".",
"year",
",",
"now",
".",
"month",
",",
"now",
".",
"day",
")",
"if",
"not",
"now",
":",
"if",
"d",
".",
"tzinfo",
":",
"now",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
"LocalTimezone",
"(",
"d",
")",
")",
"else",
":",
"now",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"delta",
"=",
"now",
"-",
"(",
"d",
"-",
"datetime",
".",
"timedelta",
"(",
"0",
",",
"0",
",",
"d",
".",
"microsecond",
")",
")",
"since",
"=",
"delta",
".",
"days",
"*",
"24",
"*",
"60",
"*",
"60",
"+",
"delta",
".",
"seconds",
"if",
"since",
"<=",
"0",
":",
"return",
"u'0 '",
"+",
"ugettext",
"(",
"'minutes'",
")",
"for",
"i",
",",
"(",
"seconds",
",",
"name",
")",
"in",
"enumerate",
"(",
"chunks",
")",
":",
"count",
"=",
"since",
"//",
"seconds",
"if",
"count",
"!=",
"0",
":",
"break",
"s",
"=",
"ugettext",
"(",
"'%(number)d %(type)s'",
")",
"%",
"{",
"'number'",
":",
"count",
",",
"'type'",
":",
"name",
"(",
"count",
")",
"}",
"if",
"i",
"+",
"1",
"<",
"len",
"(",
"chunks",
")",
":",
"seconds2",
",",
"name2",
"=",
"chunks",
"[",
"i",
"+",
"1",
"]",
"count2",
"=",
"(",
"since",
"-",
"(",
"seconds",
"*",
"count",
")",
")",
"//",
"seconds2",
"if",
"count2",
"!=",
"0",
":",
"s",
"+=",
"ugettext",
"(",
"', %(number)d %(type)s'",
")",
"%",
"{",
"'number'",
":",
"count2",
",",
"'type'",
":",
"name2",
"(",
"count2",
")",
"}",
"return",
"s"
] | python | Takes two datetime objects and returns the time between d and now
as a nicely formatted string, e.g. "10 minutes". If d occurs after now,
then "0 minutes" is returned.
Units used are years, months, weeks, days, hours, and minutes.
Seconds and microseconds are ignored. Up to two adjacent units will be
displayed. For example, "2 weeks, 3 days" and "1 year, 3 months" are
possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not.
Adapted from http://blog.natbat.co.uk/archive/2003/Jun/14/time_since | true |
2,468,737 | def have_pyrex():
"""
Return True if Cython or Pyrex can be imported.
"""
pyrex_impls = 'Cython.Distutils.build_ext', 'Pyrex.Distutils.build_ext'
for pyrex_impl in pyrex_impls:
try:
# from (pyrex_impl) import build_ext
__import__(pyrex_impl, fromlist=['build_ext']).build_ext
return True
except Exception:
pass
return False | [
"def",
"have_pyrex",
"(",
")",
":",
"pyrex_impls",
"=",
"'Cython.Distutils.build_ext'",
",",
"'Pyrex.Distutils.build_ext'",
"for",
"pyrex_impl",
"in",
"pyrex_impls",
":",
"try",
":",
"__import__",
"(",
"pyrex_impl",
",",
"fromlist",
"=",
"[",
"'build_ext'",
"]",
")",
".",
"build_ext",
"return",
"True",
"except",
"Exception",
":",
"pass",
"return",
"False"
] | python | Return True if Cython or Pyrex can be imported. | true |
2,468,948 | def user_agent():
"""
Return a string representing the user agent.
"""
data = {
"installer": {"name": "pip", "version": pip.__version__},
"python": platform.python_version(),
"implementation": {
"name": platform.python_implementation(),
},
}
if data["implementation"]["name"] == 'CPython':
data["implementation"]["version"] = platform.python_version()
elif data["implementation"]["name"] == 'PyPy':
if sys.pypy_version_info.releaselevel == 'final':
pypy_version_info = sys.pypy_version_info[:3]
else:
pypy_version_info = sys.pypy_version_info
data["implementation"]["version"] = ".".join(
[str(x) for x in pypy_version_info]
)
elif data["implementation"]["name"] == 'Jython':
# Complete Guess
data["implementation"]["version"] = platform.python_version()
elif data["implementation"]["name"] == 'IronPython':
# Complete Guess
data["implementation"]["version"] = platform.python_version()
if sys.platform.startswith("linux"):
distro = dict(filter(
lambda x: x[1],
zip(["name", "version", "id"], platform.linux_distribution()),
))
libc = dict(filter(
lambda x: x[1],
zip(["lib", "version"], platform.libc_ver()),
))
if libc:
distro["libc"] = libc
if distro:
data["distro"] = distro
if sys.platform.startswith("darwin") and platform.mac_ver()[0]:
data["distro"] = {"name": "OS X", "version": platform.mac_ver()[0]}
if platform.system():
data.setdefault("system", {})["name"] = platform.system()
if platform.release():
data.setdefault("system", {})["release"] = platform.release()
if platform.machine():
data["cpu"] = platform.machine()
return "{data[installer][name]}/{data[installer][version]} {json}".format(
data=data,
json=json.dumps(data, separators=(",", ":"), sort_keys=True),
) | [
"def",
"user_agent",
"(",
")",
":",
"data",
"=",
"{",
"\"installer\"",
":",
"{",
"\"name\"",
":",
"\"pip\"",
",",
"\"version\"",
":",
"pip",
".",
"__version__",
"}",
",",
"\"python\"",
":",
"platform",
".",
"python_version",
"(",
")",
",",
"\"implementation\"",
":",
"{",
"\"name\"",
":",
"platform",
".",
"python_implementation",
"(",
")",
",",
"}",
",",
"}",
"if",
"data",
"[",
"\"implementation\"",
"]",
"[",
"\"name\"",
"]",
"==",
"'CPython'",
":",
"data",
"[",
"\"implementation\"",
"]",
"[",
"\"version\"",
"]",
"=",
"platform",
".",
"python_version",
"(",
")",
"elif",
"data",
"[",
"\"implementation\"",
"]",
"[",
"\"name\"",
"]",
"==",
"'PyPy'",
":",
"if",
"sys",
".",
"pypy_version_info",
".",
"releaselevel",
"==",
"'final'",
":",
"pypy_version_info",
"=",
"sys",
".",
"pypy_version_info",
"[",
":",
"3",
"]",
"else",
":",
"pypy_version_info",
"=",
"sys",
".",
"pypy_version_info",
"data",
"[",
"\"implementation\"",
"]",
"[",
"\"version\"",
"]",
"=",
"\".\"",
".",
"join",
"(",
"[",
"str",
"(",
"x",
")",
"for",
"x",
"in",
"pypy_version_info",
"]",
")",
"elif",
"data",
"[",
"\"implementation\"",
"]",
"[",
"\"name\"",
"]",
"==",
"'Jython'",
":",
"data",
"[",
"\"implementation\"",
"]",
"[",
"\"version\"",
"]",
"=",
"platform",
".",
"python_version",
"(",
")",
"elif",
"data",
"[",
"\"implementation\"",
"]",
"[",
"\"name\"",
"]",
"==",
"'IronPython'",
":",
"data",
"[",
"\"implementation\"",
"]",
"[",
"\"version\"",
"]",
"=",
"platform",
".",
"python_version",
"(",
")",
"if",
"sys",
".",
"platform",
".",
"startswith",
"(",
"\"linux\"",
")",
":",
"distro",
"=",
"dict",
"(",
"filter",
"(",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
",",
"zip",
"(",
"[",
"\"name\"",
",",
"\"version\"",
",",
"\"id\"",
"]",
",",
"platform",
".",
"linux_distribution",
"(",
")",
")",
",",
")",
")",
"libc",
"=",
"dict",
"(",
"filter",
"(",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
",",
"zip",
"(",
"[",
"\"lib\"",
",",
"\"version\"",
"]",
",",
"platform",
".",
"libc_ver",
"(",
")",
")",
",",
")",
")",
"if",
"libc",
":",
"distro",
"[",
"\"libc\"",
"]",
"=",
"libc",
"if",
"distro",
":",
"data",
"[",
"\"distro\"",
"]",
"=",
"distro",
"if",
"sys",
".",
"platform",
".",
"startswith",
"(",
"\"darwin\"",
")",
"and",
"platform",
".",
"mac_ver",
"(",
")",
"[",
"0",
"]",
":",
"data",
"[",
"\"distro\"",
"]",
"=",
"{",
"\"name\"",
":",
"\"OS X\"",
",",
"\"version\"",
":",
"platform",
".",
"mac_ver",
"(",
")",
"[",
"0",
"]",
"}",
"if",
"platform",
".",
"system",
"(",
")",
":",
"data",
".",
"setdefault",
"(",
"\"system\"",
",",
"{",
"}",
")",
"[",
"\"name\"",
"]",
"=",
"platform",
".",
"system",
"(",
")",
"if",
"platform",
".",
"release",
"(",
")",
":",
"data",
".",
"setdefault",
"(",
"\"system\"",
",",
"{",
"}",
")",
"[",
"\"release\"",
"]",
"=",
"platform",
".",
"release",
"(",
")",
"if",
"platform",
".",
"machine",
"(",
")",
":",
"data",
"[",
"\"cpu\"",
"]",
"=",
"platform",
".",
"machine",
"(",
")",
"return",
"\"{data[installer][name]}/{data[installer][version]} {json}\"",
".",
"format",
"(",
"data",
"=",
"data",
",",
"json",
"=",
"json",
".",
"dumps",
"(",
"data",
",",
"separators",
"=",
"(",
"\",\"",
",",
"\":\"",
")",
",",
"sort_keys",
"=",
"True",
")",
",",
")"
] | python | Return a string representing the user agent. | true |
2,468,949 | def get_file_content(url, comes_from=None, session=None):
"""Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content). Content is unicode."""
if session is None:
raise TypeError(
"get_file_content() missing 1 required keyword argument: 'session'"
)
match = _scheme_re.search(url)
if match:
scheme = match.group(1).lower()
if (scheme == 'file' and comes_from and
comes_from.startswith('http')):
raise InstallationError(
'Requirements file %s references URL %s, which is local'
% (comes_from, url))
if scheme == 'file':
path = url.split(':', 1)[1]
path = path.replace('\\', '/')
match = _url_slash_drive_re.match(path)
if match:
path = match.group(1) + ':' + path.split('|', 1)[1]
path = urllib_parse.unquote(path)
if path.startswith('/'):
path = '/' + path.lstrip('/')
url = path
else:
# FIXME: catch some errors
resp = session.get(url)
resp.raise_for_status()
if six.PY3:
return resp.url, resp.text
else:
return resp.url, resp.content
try:
with open(url) as f:
content = f.read()
except IOError as exc:
raise InstallationError(
'Could not open requirements file: %s' % str(exc)
)
return url, content | [
"def",
"get_file_content",
"(",
"url",
",",
"comes_from",
"=",
"None",
",",
"session",
"=",
"None",
")",
":",
"if",
"session",
"is",
"None",
":",
"raise",
"TypeError",
"(",
"\"get_file_content() missing 1 required keyword argument: 'session'\"",
")",
"match",
"=",
"_scheme_re",
".",
"search",
"(",
"url",
")",
"if",
"match",
":",
"scheme",
"=",
"match",
".",
"group",
"(",
"1",
")",
".",
"lower",
"(",
")",
"if",
"(",
"scheme",
"==",
"'file'",
"and",
"comes_from",
"and",
"comes_from",
".",
"startswith",
"(",
"'http'",
")",
")",
":",
"raise",
"InstallationError",
"(",
"'Requirements file %s references URL %s, which is local'",
"%",
"(",
"comes_from",
",",
"url",
")",
")",
"if",
"scheme",
"==",
"'file'",
":",
"path",
"=",
"url",
".",
"split",
"(",
"':'",
",",
"1",
")",
"[",
"1",
"]",
"path",
"=",
"path",
".",
"replace",
"(",
"'\\\\'",
",",
"'/'",
")",
"match",
"=",
"_url_slash_drive_re",
".",
"match",
"(",
"path",
")",
"if",
"match",
":",
"path",
"=",
"match",
".",
"group",
"(",
"1",
")",
"+",
"':'",
"+",
"path",
".",
"split",
"(",
"'|'",
",",
"1",
")",
"[",
"1",
"]",
"path",
"=",
"urllib_parse",
".",
"unquote",
"(",
"path",
")",
"if",
"path",
".",
"startswith",
"(",
"'/'",
")",
":",
"path",
"=",
"'/'",
"+",
"path",
".",
"lstrip",
"(",
"'/'",
")",
"url",
"=",
"path",
"else",
":",
"resp",
"=",
"session",
".",
"get",
"(",
"url",
")",
"resp",
".",
"raise_for_status",
"(",
")",
"if",
"six",
".",
"PY3",
":",
"return",
"resp",
".",
"url",
",",
"resp",
".",
"text",
"else",
":",
"return",
"resp",
".",
"url",
",",
"resp",
".",
"content",
"try",
":",
"with",
"open",
"(",
"url",
")",
"as",
"f",
":",
"content",
"=",
"f",
".",
"read",
"(",
")",
"except",
"IOError",
"as",
"exc",
":",
"raise",
"InstallationError",
"(",
"'Could not open requirements file: %s'",
"%",
"str",
"(",
"exc",
")",
")",
"return",
"url",
",",
"content"
] | python | Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content). Content is unicode. | true |
2,469,086 | def print_results(distributions, list_all_files):
"""
Print the informations from installed distributions found.
"""
results_printed = False
for dist in distributions:
results_printed = True
logger.info("---")
logger.info("Metadata-Version: %s" % dist.get('metadata-version'))
logger.info("Name: %s" % dist['name'])
logger.info("Version: %s" % dist['version'])
logger.info("Summary: %s" % dist.get('summary'))
logger.info("Home-page: %s" % dist.get('home-page'))
logger.info("Author: %s" % dist.get('author'))
logger.info("Author-email: %s" % dist.get('author-email'))
logger.info("License: %s" % dist.get('license'))
logger.info("Location: %s" % dist['location'])
logger.info("Requires: %s" % ', '.join(dist['requires']))
if list_all_files:
logger.info("Files:")
if dist['files'] is not None:
for line in dist['files']:
logger.info(" %s" % line.strip())
else:
logger.info("Cannot locate installed-files.txt")
if 'entry_points' in dist:
logger.info("Entry-points:")
for line in dist['entry_points']:
logger.info(" %s" % line.strip())
return results_printed | [
"def",
"print_results",
"(",
"distributions",
",",
"list_all_files",
")",
":",
"results_printed",
"=",
"False",
"for",
"dist",
"in",
"distributions",
":",
"results_printed",
"=",
"True",
"logger",
".",
"info",
"(",
"\"---\"",
")",
"logger",
".",
"info",
"(",
"\"Metadata-Version: %s\"",
"%",
"dist",
".",
"get",
"(",
"'metadata-version'",
")",
")",
"logger",
".",
"info",
"(",
"\"Name: %s\"",
"%",
"dist",
"[",
"'name'",
"]",
")",
"logger",
".",
"info",
"(",
"\"Version: %s\"",
"%",
"dist",
"[",
"'version'",
"]",
")",
"logger",
".",
"info",
"(",
"\"Summary: %s\"",
"%",
"dist",
".",
"get",
"(",
"'summary'",
")",
")",
"logger",
".",
"info",
"(",
"\"Home-page: %s\"",
"%",
"dist",
".",
"get",
"(",
"'home-page'",
")",
")",
"logger",
".",
"info",
"(",
"\"Author: %s\"",
"%",
"dist",
".",
"get",
"(",
"'author'",
")",
")",
"logger",
".",
"info",
"(",
"\"Author-email: %s\"",
"%",
"dist",
".",
"get",
"(",
"'author-email'",
")",
")",
"logger",
".",
"info",
"(",
"\"License: %s\"",
"%",
"dist",
".",
"get",
"(",
"'license'",
")",
")",
"logger",
".",
"info",
"(",
"\"Location: %s\"",
"%",
"dist",
"[",
"'location'",
"]",
")",
"logger",
".",
"info",
"(",
"\"Requires: %s\"",
"%",
"', '",
".",
"join",
"(",
"dist",
"[",
"'requires'",
"]",
")",
")",
"if",
"list_all_files",
":",
"logger",
".",
"info",
"(",
"\"Files:\"",
")",
"if",
"dist",
"[",
"'files'",
"]",
"is",
"not",
"None",
":",
"for",
"line",
"in",
"dist",
"[",
"'files'",
"]",
":",
"logger",
".",
"info",
"(",
"\" %s\"",
"%",
"line",
".",
"strip",
"(",
")",
")",
"else",
":",
"logger",
".",
"info",
"(",
"\"Cannot locate installed-files.txt\"",
")",
"if",
"'entry_points'",
"in",
"dist",
":",
"logger",
".",
"info",
"(",
"\"Entry-points:\"",
")",
"for",
"line",
"in",
"dist",
"[",
"'entry_points'",
"]",
":",
"logger",
".",
"info",
"(",
"\" %s\"",
"%",
"line",
".",
"strip",
"(",
")",
")",
"return",
"results_printed"
] | python | Print the informations from installed distributions found. | true |
2,469,112 | def _default_template_ctx_processor():
"""Default template context processor. Injects `request`,
`session` and `g`.
"""
reqctx = _request_ctx_stack.top
appctx = _app_ctx_stack.top
rv = {}
if appctx is not None:
rv['g'] = appctx.g
if reqctx is not None:
rv['request'] = reqctx.request
rv['session'] = reqctx.session
return rv | [
"def",
"_default_template_ctx_processor",
"(",
")",
":",
"reqctx",
"=",
"_request_ctx_stack",
".",
"top",
"appctx",
"=",
"_app_ctx_stack",
".",
"top",
"rv",
"=",
"{",
"}",
"if",
"appctx",
"is",
"not",
"None",
":",
"rv",
"[",
"'g'",
"]",
"=",
"appctx",
".",
"g",
"if",
"reqctx",
"is",
"not",
"None",
":",
"rv",
"[",
"'request'",
"]",
"=",
"reqctx",
".",
"request",
"rv",
"[",
"'session'",
"]",
"=",
"reqctx",
".",
"session",
"return",
"rv"
] | python | Default template context processor. Injects `request`,
`session` and `g`. | true |
2,469,115 | def render_template_string(source, **context):
"""Renders a template from the given template source string
with the given context.
:param source: the sourcecode of the template to be
rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _app_ctx_stack.top
ctx.app.update_template_context(context)
return _render(ctx.app.jinja_env.from_string(source),
context, ctx.app) | [
"def",
"render_template_string",
"(",
"source",
",",
"**",
"context",
")",
":",
"ctx",
"=",
"_app_ctx_stack",
".",
"top",
"ctx",
".",
"app",
".",
"update_template_context",
"(",
"context",
")",
"return",
"_render",
"(",
"ctx",
".",
"app",
".",
"jinja_env",
".",
"from_string",
"(",
"source",
")",
",",
"context",
",",
"ctx",
".",
"app",
")"
] | python | Renders a template from the given template source string
with the given context.
:param source: the sourcecode of the template to be
rendered
:param context: the variables that should be available in the
context of the template. | true |
2,469,135 | def find_meta(meta):
"""
Extract __*meta*__ from META_FILE.
"""
meta_match = re.search(
r"^__{meta}__ = ['\"]([^'\"]*)['\"]".format(meta=meta),
META_FILE, re.M
)
if meta_match:
return meta_match.group(1)
raise RuntimeError("Unable to find __{meta}__ string.".format(meta=meta)) | [
"def",
"find_meta",
"(",
"meta",
")",
":",
"meta_match",
"=",
"re",
".",
"search",
"(",
"r\"^__{meta}__ = ['\\\"]([^'\\\"]*)['\\\"]\"",
".",
"format",
"(",
"meta",
"=",
"meta",
")",
",",
"META_FILE",
",",
"re",
".",
"M",
")",
"if",
"meta_match",
":",
"return",
"meta_match",
".",
"group",
"(",
"1",
")",
"raise",
"RuntimeError",
"(",
"\"Unable to find __{meta}__ string.\"",
".",
"format",
"(",
"meta",
"=",
"meta",
")",
")"
] | python | Extract __*meta*__ from META_FILE. | true |
2,469,844 | def make_attrgetter(environment, attribute):
"""Returns a callable that looks up the given attribute from a
passed object with the rules of the environment. Dots are allowed
to access attributes of attributes. Integer parts in paths are
looked up as integers.
"""
if not isinstance(attribute, string_types) \
or ('.' not in attribute and not attribute.isdigit()):
return lambda x: environment.getitem(x, attribute)
attribute = attribute.split('.')
def attrgetter(item):
for part in attribute:
if part.isdigit():
part = int(part)
item = environment.getitem(item, part)
return item
return attrgetter | [
"def",
"make_attrgetter",
"(",
"environment",
",",
"attribute",
")",
":",
"if",
"not",
"isinstance",
"(",
"attribute",
",",
"string_types",
")",
"or",
"(",
"'.'",
"not",
"in",
"attribute",
"and",
"not",
"attribute",
".",
"isdigit",
"(",
")",
")",
":",
"return",
"lambda",
"x",
":",
"environment",
".",
"getitem",
"(",
"x",
",",
"attribute",
")",
"attribute",
"=",
"attribute",
".",
"split",
"(",
"'.'",
")",
"def",
"attrgetter",
"(",
"item",
")",
":",
"for",
"part",
"in",
"attribute",
":",
"if",
"part",
".",
"isdigit",
"(",
")",
":",
"part",
"=",
"int",
"(",
"part",
")",
"item",
"=",
"environment",
".",
"getitem",
"(",
"item",
",",
"part",
")",
"return",
"item",
"return",
"attrgetter"
] | python | Returns a callable that looks up the given attribute from a
passed object with the rules of the environment. Dots are allowed
to access attributes of attributes. Integer parts in paths are
looked up as integers. | true |
2,469,845 | def do_title(s):
"""Return a titlecased version of the value. I.e. words will start with
uppercase letters, all remaining characters are lowercase.
"""
rv = []
for item in re.compile(r'([-\s]+)(?u)').split(s):
if not item:
continue
rv.append(item[0].upper() + item[1:].lower())
return ''.join(rv) | [
"def",
"do_title",
"(",
"s",
")",
":",
"rv",
"=",
"[",
"]",
"for",
"item",
"in",
"re",
".",
"compile",
"(",
"r'([-\\s]+)(?u)'",
")",
".",
"split",
"(",
"s",
")",
":",
"if",
"not",
"item",
":",
"continue",
"rv",
".",
"append",
"(",
"item",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"+",
"item",
"[",
"1",
":",
"]",
".",
"lower",
"(",
")",
")",
"return",
"''",
".",
"join",
"(",
"rv",
")"
] | python | Return a titlecased version of the value. I.e. words will start with
uppercase letters, all remaining characters are lowercase. | true |
2,469,846 | def do_dictsort(value, case_sensitive=False, by='key'):
"""Sort a dict and yield (key, value) pairs. Because python dicts are
unsorted you may want to use this function to order them by either
key or value:
.. sourcecode:: jinja
{% for item in mydict|dictsort %}
sort the dict by key, case insensitive
{% for item in mydict|dictsort(true) %}
sort the dict by key, case sensitive
{% for item in mydict|dictsort(false, 'value') %}
sort the dict by key, case insensitive, sorted
normally and ordered by value.
"""
if by == 'key':
pos = 0
elif by == 'value':
pos = 1
else:
raise FilterArgumentError('You can only sort by either '
'"key" or "value"')
def sort_func(item):
value = item[pos]
if isinstance(value, string_types) and not case_sensitive:
value = value.lower()
return value
return sorted(value.items(), key=sort_func) | [
"def",
"do_dictsort",
"(",
"value",
",",
"case_sensitive",
"=",
"False",
",",
"by",
"=",
"'key'",
")",
":",
"if",
"by",
"==",
"'key'",
":",
"pos",
"=",
"0",
"elif",
"by",
"==",
"'value'",
":",
"pos",
"=",
"1",
"else",
":",
"raise",
"FilterArgumentError",
"(",
"'You can only sort by either '",
"'\"key\" or \"value\"'",
")",
"def",
"sort_func",
"(",
"item",
")",
":",
"value",
"=",
"item",
"[",
"pos",
"]",
"if",
"isinstance",
"(",
"value",
",",
"string_types",
")",
"and",
"not",
"case_sensitive",
":",
"value",
"=",
"value",
".",
"lower",
"(",
")",
"return",
"value",
"return",
"sorted",
"(",
"value",
".",
"items",
"(",
")",
",",
"key",
"=",
"sort_func",
")"
] | python | Sort a dict and yield (key, value) pairs. Because python dicts are
unsorted you may want to use this function to order them by either
key or value:
.. sourcecode:: jinja
{% for item in mydict|dictsort %}
sort the dict by key, case insensitive
{% for item in mydict|dictsort(true) %}
sort the dict by key, case sensitive
{% for item in mydict|dictsort(false, 'value') %}
sort the dict by key, case insensitive, sorted
normally and ordered by value. | true |
2,469,847 | def do_sort(environment, value, reverse=False, case_sensitive=False,
attribute=None):
"""Sort an iterable. Per default it sorts ascending, if you pass it
true as first argument it will reverse the sorting.
If the iterable is made of strings the third parameter can be used to
control the case sensitiveness of the comparison which is disabled by
default.
.. sourcecode:: jinja
{% for item in iterable|sort %}
...
{% endfor %}
It is also possible to sort by an attribute (for example to sort
by the date of an object) by specifying the `attribute` parameter:
.. sourcecode:: jinja
{% for item in iterable|sort(attribute='date') %}
...
{% endfor %}
.. versionchanged:: 2.6
The `attribute` parameter was added.
"""
if not case_sensitive:
def sort_func(item):
if isinstance(item, string_types):
item = item.lower()
return item
else:
sort_func = None
if attribute is not None:
getter = make_attrgetter(environment, attribute)
def sort_func(item, processor=sort_func or (lambda x: x)):
return processor(getter(item))
return sorted(value, key=sort_func, reverse=reverse) | [
"def",
"do_sort",
"(",
"environment",
",",
"value",
",",
"reverse",
"=",
"False",
",",
"case_sensitive",
"=",
"False",
",",
"attribute",
"=",
"None",
")",
":",
"if",
"not",
"case_sensitive",
":",
"def",
"sort_func",
"(",
"item",
")",
":",
"if",
"isinstance",
"(",
"item",
",",
"string_types",
")",
":",
"item",
"=",
"item",
".",
"lower",
"(",
")",
"return",
"item",
"else",
":",
"sort_func",
"=",
"None",
"if",
"attribute",
"is",
"not",
"None",
":",
"getter",
"=",
"make_attrgetter",
"(",
"environment",
",",
"attribute",
")",
"def",
"sort_func",
"(",
"item",
",",
"processor",
"=",
"sort_func",
"or",
"(",
"lambda",
"x",
":",
"x",
")",
")",
":",
"return",
"processor",
"(",
"getter",
"(",
"item",
")",
")",
"return",
"sorted",
"(",
"value",
",",
"key",
"=",
"sort_func",
",",
"reverse",
"=",
"reverse",
")"
] | python | Sort an iterable. Per default it sorts ascending, if you pass it
true as first argument it will reverse the sorting.
If the iterable is made of strings the third parameter can be used to
control the case sensitiveness of the comparison which is disabled by
default.
.. sourcecode:: jinja
{% for item in iterable|sort %}
...
{% endfor %}
It is also possible to sort by an attribute (for example to sort
by the date of an object) by specifying the `attribute` parameter:
.. sourcecode:: jinja
{% for item in iterable|sort(attribute='date') %}
...
{% endfor %}
.. versionchanged:: 2.6
The `attribute` parameter was added. | true |
2,469,849 | def do_map(*args, **kwargs):
"""Applies a filter on a sequence of objects or looks up an attribute.
This is useful when dealing with lists of objects but you are really
only interested in a certain value of it.
The basic usage is mapping on an attribute. Imagine you have a list
of users but you are only interested in a list of usernames:
.. sourcecode:: jinja
Users on this page: {{ users|map(attribute='username')|join(', ') }}
Alternatively you can let it invoke a filter by passing the name of the
filter and the arguments afterwards. A good example would be applying a
text conversion filter on a sequence:
.. sourcecode:: jinja
Users on this page: {{ titles|map('lower')|join(', ') }}
.. versionadded:: 2.7
"""
context = args[0]
seq = args[1]
if len(args) == 2 and 'attribute' in kwargs:
attribute = kwargs.pop('attribute')
if kwargs:
raise FilterArgumentError('Unexpected keyword argument %r' %
next(iter(kwargs)))
func = make_attrgetter(context.environment, attribute)
else:
try:
name = args[2]
args = args[3:]
except LookupError:
raise FilterArgumentError('map requires a filter argument')
func = lambda item: context.environment.call_filter(
name, item, args, kwargs, context=context)
if seq:
for item in seq:
yield func(item) | [
"def",
"do_map",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"context",
"=",
"args",
"[",
"0",
"]",
"seq",
"=",
"args",
"[",
"1",
"]",
"if",
"len",
"(",
"args",
")",
"==",
"2",
"and",
"'attribute'",
"in",
"kwargs",
":",
"attribute",
"=",
"kwargs",
".",
"pop",
"(",
"'attribute'",
")",
"if",
"kwargs",
":",
"raise",
"FilterArgumentError",
"(",
"'Unexpected keyword argument %r'",
"%",
"next",
"(",
"iter",
"(",
"kwargs",
")",
")",
")",
"func",
"=",
"make_attrgetter",
"(",
"context",
".",
"environment",
",",
"attribute",
")",
"else",
":",
"try",
":",
"name",
"=",
"args",
"[",
"2",
"]",
"args",
"=",
"args",
"[",
"3",
":",
"]",
"except",
"LookupError",
":",
"raise",
"FilterArgumentError",
"(",
"'map requires a filter argument'",
")",
"func",
"=",
"lambda",
"item",
":",
"context",
".",
"environment",
".",
"call_filter",
"(",
"name",
",",
"item",
",",
"args",
",",
"kwargs",
",",
"context",
"=",
"context",
")",
"if",
"seq",
":",
"for",
"item",
"in",
"seq",
":",
"yield",
"func",
"(",
"item",
")"
] | python | Applies a filter on a sequence of objects or looks up an attribute.
This is useful when dealing with lists of objects but you are really
only interested in a certain value of it.
The basic usage is mapping on an attribute. Imagine you have a list
of users but you are only interested in a list of usernames:
.. sourcecode:: jinja
Users on this page: {{ users|map(attribute='username')|join(', ') }}
Alternatively you can let it invoke a filter by passing the name of the
filter and the arguments afterwards. A good example would be applying a
text conversion filter on a sequence:
.. sourcecode:: jinja
Users on this page: {{ titles|map('lower')|join(', ') }}
.. versionadded:: 2.7 | true |
2,469,861 | def create_logger(app):
"""Creates a logger for the given application. This logger works
similar to a regular Python logger but changes the effective logging
level based on the application's debug flag. Furthermore this
function also removes all attached handlers in case there was a
logger with the log name before.
"""
Logger = getLoggerClass()
class DebugLogger(Logger):
def getEffectiveLevel(x):
if x.level == 0 and app.debug:
return DEBUG
return Logger.getEffectiveLevel(x)
class DebugHandler(StreamHandler):
def emit(x, record):
StreamHandler.emit(x, record) if app.debug else None
handler = DebugHandler()
handler.setLevel(DEBUG)
handler.setFormatter(Formatter(app.debug_log_format))
logger = getLogger(app.logger_name)
# just in case that was not a new logger, get rid of all the handlers
# already attached to it.
del logger.handlers[:]
logger.__class__ = DebugLogger
logger.addHandler(handler)
return logger | [
"def",
"create_logger",
"(",
"app",
")",
":",
"Logger",
"=",
"getLoggerClass",
"(",
")",
"class",
"DebugLogger",
"(",
"Logger",
")",
":",
"def",
"getEffectiveLevel",
"(",
"x",
")",
":",
"if",
"x",
".",
"level",
"==",
"0",
"and",
"app",
".",
"debug",
":",
"return",
"DEBUG",
"return",
"Logger",
".",
"getEffectiveLevel",
"(",
"x",
")",
"class",
"DebugHandler",
"(",
"StreamHandler",
")",
":",
"def",
"emit",
"(",
"x",
",",
"record",
")",
":",
"StreamHandler",
".",
"emit",
"(",
"x",
",",
"record",
")",
"if",
"app",
".",
"debug",
"else",
"None",
"handler",
"=",
"DebugHandler",
"(",
")",
"handler",
".",
"setLevel",
"(",
"DEBUG",
")",
"handler",
".",
"setFormatter",
"(",
"Formatter",
"(",
"app",
".",
"debug_log_format",
")",
")",
"logger",
"=",
"getLogger",
"(",
"app",
".",
"logger_name",
")",
"del",
"logger",
".",
"handlers",
"[",
":",
"]",
"logger",
".",
"__class__",
"=",
"DebugLogger",
"logger",
".",
"addHandler",
"(",
"handler",
")",
"return",
"logger"
] | python | Creates a logger for the given application. This logger works
similar to a regular Python logger but changes the effective logging
level based on the application's debug flag. Furthermore this
function also removes all attached handlers in case there was a
logger with the log name before. | true |
2,470,860 | def make_log_record_output(category, level, message,
format=None, datefmt=None, **kwargs):
"""
Create the output for a log record, like performed by :mod:`logging` module.
:param category: Name of the logger (as string or None).
:param level: Log level (as number).
:param message: Log message to use.
:returns: Log record output (as string)
"""
if not category or (category == "__ROOT__"):
category = "root"
levelname = logging.getLevelName(level)
record_data = dict(name=category, levelname=levelname, msg=message)
record_data.update(kwargs)
record = logging.makeLogRecord(record_data)
formatter = logging.Formatter(format, datefmt=datefmt)
return formatter.format(record) | [
"def",
"make_log_record_output",
"(",
"category",
",",
"level",
",",
"message",
",",
"format",
"=",
"None",
",",
"datefmt",
"=",
"None",
",",
"**",
"kwargs",
")",
":",
"if",
"not",
"category",
"or",
"(",
"category",
"==",
"\"__ROOT__\"",
")",
":",
"category",
"=",
"\"root\"",
"levelname",
"=",
"logging",
".",
"getLevelName",
"(",
"level",
")",
"record_data",
"=",
"dict",
"(",
"name",
"=",
"category",
",",
"levelname",
"=",
"levelname",
",",
"msg",
"=",
"message",
")",
"record_data",
".",
"update",
"(",
"kwargs",
")",
"record",
"=",
"logging",
".",
"makeLogRecord",
"(",
"record_data",
")",
"formatter",
"=",
"logging",
".",
"Formatter",
"(",
"format",
",",
"datefmt",
"=",
"datefmt",
")",
"return",
"formatter",
".",
"format",
"(",
"record",
")"
] | python | Create the output for a log record, like performed by :mod:`logging` module.
:param category: Name of the logger (as string or None).
:param level: Log level (as number).
:param message: Log message to use.
:returns: Log record output (as string) | true |
2,470,861 | def step_I_create_logrecords_with_table(context):
"""
Step definition that creates one more log records by using a table.
.. code-block: gherkin
When I create log records with:
| category | level | message |
| foo | ERROR | Hello Foo |
| foo.bar | WARN | Hello Foo.Bar |
Table description
------------------
| Column | Type | Required | Description |
| category | string | yes | Category (or logger) to use. |
| level | LogLevel | yes | Log level to use. |
| message | string | yes | Log message to use. |
.. code-block: python
import logging
from behave.configuration import LogLevel
for row in table.rows:
logger = logging.getLogger(row.category)
level = LogLevel.parse_type(row.level)
logger.log(level, row.message)
"""
assert context.table, "REQUIRE: context.table"
context.table.require_columns(["category", "level", "message"])
for row in context.table.rows:
category = row["category"]
if category == "__ROOT__":
category = None
level = LogLevel.parse_type(row["level"])
message = row["message"]
make_log_record(category, level, message) | [
"def",
"step_I_create_logrecords_with_table",
"(",
"context",
")",
":",
"assert",
"context",
".",
"table",
",",
"\"REQUIRE: context.table\"",
"context",
".",
"table",
".",
"require_columns",
"(",
"[",
"\"category\"",
",",
"\"level\"",
",",
"\"message\"",
"]",
")",
"for",
"row",
"in",
"context",
".",
"table",
".",
"rows",
":",
"category",
"=",
"row",
"[",
"\"category\"",
"]",
"if",
"category",
"==",
"\"__ROOT__\"",
":",
"category",
"=",
"None",
"level",
"=",
"LogLevel",
".",
"parse_type",
"(",
"row",
"[",
"\"level\"",
"]",
")",
"message",
"=",
"row",
"[",
"\"message\"",
"]",
"make_log_record",
"(",
"category",
",",
"level",
",",
"message",
")"
] | python | Step definition that creates one more log records by using a table.
.. code-block: gherkin
When I create log records with:
| category | level | message |
| foo | ERROR | Hello Foo |
| foo.bar | WARN | Hello Foo.Bar |
Table description
------------------
| Column | Type | Required | Description |
| category | string | yes | Category (or logger) to use. |
| level | LogLevel | yes | Log level to use. |
| message | string | yes | Log message to use. |
.. code-block: python
import logging
from behave.configuration import LogLevel
for row in table.rows:
logger = logging.getLogger(row.category)
level = LogLevel.parse_type(row.level)
logger.log(level, row.message) | true |
2,470,862 | def step_I_create_logrecord_with_table(context):
"""
Create an log record by using a table to provide the parts.
.. seealso: :func:`step_I_create_logrecords_with_table()`
"""
assert context.table, "REQUIRE: context.table"
assert len(context.table.rows) == 1, "REQUIRE: table.row.size == 1"
step_I_create_logrecords_with_table(context) | [
"def",
"step_I_create_logrecord_with_table",
"(",
"context",
")",
":",
"assert",
"context",
".",
"table",
",",
"\"REQUIRE: context.table\"",
"assert",
"len",
"(",
"context",
".",
"table",
".",
"rows",
")",
"==",
"1",
",",
"\"REQUIRE: table.row.size == 1\"",
"step_I_create_logrecords_with_table",
"(",
"context",
")"
] | python | Create an log record by using a table to provide the parts.
.. seealso: :func:`step_I_create_logrecords_with_table()` | true |
2,470,864 | def step_command_output_should_contain_log_records(context):
"""
Verifies that the command output contains the specified log records
(in any order).
.. code-block: gherkin
Then the command output should contain the following log records:
| category | level | message |
| bar | CURRENT | xxx |
"""
assert context.table, "REQUIRE: context.table"
context.table.require_columns(["category", "level", "message"])
format = getattr(context, "log_record_format", context.config.logging_format)
for row in context.table.rows:
output = LogRecordTable.make_output_for_row(row, format)
context.execute_steps(u'''
Then the command output should contain:
"""
{expected_output}
"""
'''.format(expected_output=output)) | [
"def",
"step_command_output_should_contain_log_records",
"(",
"context",
")",
":",
"assert",
"context",
".",
"table",
",",
"\"REQUIRE: context.table\"",
"context",
".",
"table",
".",
"require_columns",
"(",
"[",
"\"category\"",
",",
"\"level\"",
",",
"\"message\"",
"]",
")",
"format",
"=",
"getattr",
"(",
"context",
",",
"\"log_record_format\"",
",",
"context",
".",
"config",
".",
"logging_format",
")",
"for",
"row",
"in",
"context",
".",
"table",
".",
"rows",
":",
"output",
"=",
"LogRecordTable",
".",
"make_output_for_row",
"(",
"row",
",",
"format",
")",
"context",
".",
"execute_steps",
"(",
"u'''\n Then the command output should contain:\n \"\"\"\n {expected_output}\n \"\"\"\n '''",
".",
"format",
"(",
"expected_output",
"=",
"output",
")",
")"
] | python | Verifies that the command output contains the specified log records
(in any order).
.. code-block: gherkin
Then the command output should contain the following log records:
| category | level | message |
| bar | CURRENT | xxx | | true |
2,470,865 | def step_command_output_should_not_contain_log_records(context):
"""
Verifies that the command output contains the specified log records
(in any order).
.. code-block: gherkin
Then the command output should contain the following log records:
| category | level | message |
| bar | CURRENT | xxx |
"""
assert context.table, "REQUIRE: context.table"
context.table.require_columns(["category", "level", "message"])
format = getattr(context, "log_record_format", context.config.logging_format)
for row in context.table.rows:
output = LogRecordTable.make_output_for_row(row, format)
context.execute_steps(u'''
Then the command output should not contain:
"""
{expected_output}
"""
'''.format(expected_output=output)) | [
"def",
"step_command_output_should_not_contain_log_records",
"(",
"context",
")",
":",
"assert",
"context",
".",
"table",
",",
"\"REQUIRE: context.table\"",
"context",
".",
"table",
".",
"require_columns",
"(",
"[",
"\"category\"",
",",
"\"level\"",
",",
"\"message\"",
"]",
")",
"format",
"=",
"getattr",
"(",
"context",
",",
"\"log_record_format\"",
",",
"context",
".",
"config",
".",
"logging_format",
")",
"for",
"row",
"in",
"context",
".",
"table",
".",
"rows",
":",
"output",
"=",
"LogRecordTable",
".",
"make_output_for_row",
"(",
"row",
",",
"format",
")",
"context",
".",
"execute_steps",
"(",
"u'''\n Then the command output should not contain:\n \"\"\"\n {expected_output}\n \"\"\"\n '''",
".",
"format",
"(",
"expected_output",
"=",
"output",
")",
")"
] | python | Verifies that the command output contains the specified log records
(in any order).
.. code-block: gherkin
Then the command output should contain the following log records:
| category | level | message |
| bar | CURRENT | xxx | | true |
2,470,868 | def step_command_output_should_contain_log_records_from_categories(context):
"""
Verifies that the command output contains the specified log records
(in any order).
.. code-block: gherkin
Given I define a log record schema:
| category | level | message |
| root | ERROR | __LOG_MESSAGE__ |
Then the command output should contain log records from categories:
| category |
| bar |
"""
assert context.table, "REQUIRE: context.table"
context.table.require_column("category")
record_schema = context.log_record_row_schema
LogRecordTable.annotate_with_row_schema(context.table, record_schema)
step_command_output_should_contain_log_records(context)
context.table.remove_columns(["level", "message"]) | [
"def",
"step_command_output_should_contain_log_records_from_categories",
"(",
"context",
")",
":",
"assert",
"context",
".",
"table",
",",
"\"REQUIRE: context.table\"",
"context",
".",
"table",
".",
"require_column",
"(",
"\"category\"",
")",
"record_schema",
"=",
"context",
".",
"log_record_row_schema",
"LogRecordTable",
".",
"annotate_with_row_schema",
"(",
"context",
".",
"table",
",",
"record_schema",
")",
"step_command_output_should_contain_log_records",
"(",
"context",
")",
"context",
".",
"table",
".",
"remove_columns",
"(",
"[",
"\"level\"",
",",
"\"message\"",
"]",
")"
] | python | Verifies that the command output contains the specified log records
(in any order).
.. code-block: gherkin
Given I define a log record schema:
| category | level | message |
| root | ERROR | __LOG_MESSAGE__ |
Then the command output should contain log records from categories:
| category |
| bar | | true |
2,470,869 | def step_command_output_should_not_contain_log_records_from_categories(context):
"""
Verifies that the command output contains not log records from
the provided log categories (in any order).
.. code-block: gherkin
Given I define the log record schema:
| category | level | message |
| root | ERROR | __LOG_MESSAGE__ |
Then the command output should not contain log records from categories:
| category |
| bar |
"""
assert context.table, "REQUIRE: context.table"
context.table.require_column("category")
record_schema = context.log_record_row_schema
LogRecordTable.annotate_with_row_schema(context.table, record_schema)
step_command_output_should_not_contain_log_records(context)
context.table.remove_columns(["level", "message"]) | [
"def",
"step_command_output_should_not_contain_log_records_from_categories",
"(",
"context",
")",
":",
"assert",
"context",
".",
"table",
",",
"\"REQUIRE: context.table\"",
"context",
".",
"table",
".",
"require_column",
"(",
"\"category\"",
")",
"record_schema",
"=",
"context",
".",
"log_record_row_schema",
"LogRecordTable",
".",
"annotate_with_row_schema",
"(",
"context",
".",
"table",
",",
"record_schema",
")",
"step_command_output_should_not_contain_log_records",
"(",
"context",
")",
"context",
".",
"table",
".",
"remove_columns",
"(",
"[",
"\"level\"",
",",
"\"message\"",
"]",
")"
] | python | Verifies that the command output contains not log records from
the provided log categories (in any order).
.. code-block: gherkin
Given I define the log record schema:
| category | level | message |
| root | ERROR | __LOG_MESSAGE__ |
Then the command output should not contain log records from categories:
| category |
| bar | | true |
2,470,870 | def step_file_should_contain_log_records(context, filename):
"""
Verifies that the command output contains the specified log records
(in any order).
.. code-block: gherkin
Then the file "xxx.log" should contain the log records:
| category | level | message |
| bar | CURRENT | xxx |
"""
assert context.table, "REQUIRE: context.table"
context.table.require_columns(["category", "level", "message"])
format = getattr(context, "log_record_format", context.config.logging_format)
for row in context.table.rows:
output = LogRecordTable.make_output_for_row(row, format)
context.text = output
step_file_should_contain_multiline_text(context, filename) | [
"def",
"step_file_should_contain_log_records",
"(",
"context",
",",
"filename",
")",
":",
"assert",
"context",
".",
"table",
",",
"\"REQUIRE: context.table\"",
"context",
".",
"table",
".",
"require_columns",
"(",
"[",
"\"category\"",
",",
"\"level\"",
",",
"\"message\"",
"]",
")",
"format",
"=",
"getattr",
"(",
"context",
",",
"\"log_record_format\"",
",",
"context",
".",
"config",
".",
"logging_format",
")",
"for",
"row",
"in",
"context",
".",
"table",
".",
"rows",
":",
"output",
"=",
"LogRecordTable",
".",
"make_output_for_row",
"(",
"row",
",",
"format",
")",
"context",
".",
"text",
"=",
"output",
"step_file_should_contain_multiline_text",
"(",
"context",
",",
"filename",
")"
] | python | Verifies that the command output contains the specified log records
(in any order).
.. code-block: gherkin
Then the file "xxx.log" should contain the log records:
| category | level | message |
| bar | CURRENT | xxx | | true |