id_within_dataset
int64 1
55.5k
| snippet
stringlengths 19
14.2k
| tokens
sequencelengths 6
1.63k
| nl
stringlengths 6
352
| split_within_dataset
stringclasses 1
value | is_duplicated
bool 2
classes |
---|---|---|---|---|---|
133 | def load_default():
return _module_to_dict(defaultconfig)
| [
"def",
"load_default",
"(",
")",
":",
"return",
"_module_to_dict",
"(",
"defaultconfig",
")"
] | load a "better than nothing" default font . | train | false |
134 | def make_auth_headers():
if (not os.path.exists('.appveyor.token')):
raise RuntimeError('Please create a file named `.appveyor.token` in the current directory. You can get the token from https://ci.appveyor.com/api-token')
with open('.appveyor.token') as f:
token = f.read().strip()
headers = {'Authorization': 'Bearer {}'.format(token)}
return headers
| [
"def",
"make_auth_headers",
"(",
")",
":",
"if",
"(",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"'.appveyor.token'",
")",
")",
":",
"raise",
"RuntimeError",
"(",
"'Please create a file named `.appveyor.token` in the current directory. You can get the token from https://ci.appveyor.com/api-token'",
")",
"with",
"open",
"(",
"'.appveyor.token'",
")",
"as",
"f",
":",
"token",
"=",
"f",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
"headers",
"=",
"{",
"'Authorization'",
":",
"'Bearer {}'",
".",
"format",
"(",
"token",
")",
"}",
"return",
"headers"
] | make the authentication headers needed to use the appveyor api . | train | true |
135 | def _compute_neighbors(image, structure, offset):
structure[tuple(offset)] = 0
locations = np.transpose(np.nonzero(structure))
sqdistances = np.sum(((locations - offset) ** 2), axis=1)
neighborhood = (np.ravel_multi_index(locations.T, image.shape) - np.ravel_multi_index(offset, image.shape)).astype(np.int32)
sorted_neighborhood = neighborhood[np.argsort(sqdistances)]
return sorted_neighborhood
| [
"def",
"_compute_neighbors",
"(",
"image",
",",
"structure",
",",
"offset",
")",
":",
"structure",
"[",
"tuple",
"(",
"offset",
")",
"]",
"=",
"0",
"locations",
"=",
"np",
".",
"transpose",
"(",
"np",
".",
"nonzero",
"(",
"structure",
")",
")",
"sqdistances",
"=",
"np",
".",
"sum",
"(",
"(",
"(",
"locations",
"-",
"offset",
")",
"**",
"2",
")",
",",
"axis",
"=",
"1",
")",
"neighborhood",
"=",
"(",
"np",
".",
"ravel_multi_index",
"(",
"locations",
".",
"T",
",",
"image",
".",
"shape",
")",
"-",
"np",
".",
"ravel_multi_index",
"(",
"offset",
",",
"image",
".",
"shape",
")",
")",
".",
"astype",
"(",
"np",
".",
"int32",
")",
"sorted_neighborhood",
"=",
"neighborhood",
"[",
"np",
".",
"argsort",
"(",
"sqdistances",
")",
"]",
"return",
"sorted_neighborhood"
] | compute neighborhood as an array of linear offsets into the image . | train | false |
136 | @RegisterWithArgChecks(name='neighbor.get', req_args=[neighbors.IP_ADDRESS])
def get_neighbor_conf(neigh_ip_address):
neigh_conf = _get_neighbor_conf(neigh_ip_address)
return neigh_conf.settings
| [
"@",
"RegisterWithArgChecks",
"(",
"name",
"=",
"'neighbor.get'",
",",
"req_args",
"=",
"[",
"neighbors",
".",
"IP_ADDRESS",
"]",
")",
"def",
"get_neighbor_conf",
"(",
"neigh_ip_address",
")",
":",
"neigh_conf",
"=",
"_get_neighbor_conf",
"(",
"neigh_ip_address",
")",
"return",
"neigh_conf",
".",
"settings"
] | returns a neighbor configuration for given ip address if exists . | train | false |
137 | @receiver(send_user_notification, sender=CourseCreator)
def send_user_notification_callback(sender, **kwargs):
user = kwargs['user']
updated_state = kwargs['state']
studio_request_email = settings.FEATURES.get('STUDIO_REQUEST_EMAIL', '')
context = {'studio_request_email': studio_request_email}
subject = render_to_string('emails/course_creator_subject.txt', context)
subject = ''.join(subject.splitlines())
if (updated_state == CourseCreator.GRANTED):
message_template = 'emails/course_creator_granted.txt'
elif (updated_state == CourseCreator.DENIED):
message_template = 'emails/course_creator_denied.txt'
else:
message_template = 'emails/course_creator_revoked.txt'
message = render_to_string(message_template, context)
try:
user.email_user(subject, message, studio_request_email)
except:
log.warning('Unable to send course creator status e-mail to %s', user.email)
| [
"@",
"receiver",
"(",
"send_user_notification",
",",
"sender",
"=",
"CourseCreator",
")",
"def",
"send_user_notification_callback",
"(",
"sender",
",",
"**",
"kwargs",
")",
":",
"user",
"=",
"kwargs",
"[",
"'user'",
"]",
"updated_state",
"=",
"kwargs",
"[",
"'state'",
"]",
"studio_request_email",
"=",
"settings",
".",
"FEATURES",
".",
"get",
"(",
"'STUDIO_REQUEST_EMAIL'",
",",
"''",
")",
"context",
"=",
"{",
"'studio_request_email'",
":",
"studio_request_email",
"}",
"subject",
"=",
"render_to_string",
"(",
"'emails/course_creator_subject.txt'",
",",
"context",
")",
"subject",
"=",
"''",
".",
"join",
"(",
"subject",
".",
"splitlines",
"(",
")",
")",
"if",
"(",
"updated_state",
"==",
"CourseCreator",
".",
"GRANTED",
")",
":",
"message_template",
"=",
"'emails/course_creator_granted.txt'",
"elif",
"(",
"updated_state",
"==",
"CourseCreator",
".",
"DENIED",
")",
":",
"message_template",
"=",
"'emails/course_creator_denied.txt'",
"else",
":",
"message_template",
"=",
"'emails/course_creator_revoked.txt'",
"message",
"=",
"render_to_string",
"(",
"message_template",
",",
"context",
")",
"try",
":",
"user",
".",
"email_user",
"(",
"subject",
",",
"message",
",",
"studio_request_email",
")",
"except",
":",
"log",
".",
"warning",
"(",
"'Unable to send course creator status e-mail to %s'",
",",
"user",
".",
"email",
")"
] | callback for notifying user about course creator status change . | train | false |
139 | def _validate_nrows(nrows):
msg = "'nrows' must be an integer"
if (nrows is not None):
if is_float(nrows):
if (int(nrows) != nrows):
raise ValueError(msg)
nrows = int(nrows)
elif (not is_integer(nrows)):
raise ValueError(msg)
return nrows
| [
"def",
"_validate_nrows",
"(",
"nrows",
")",
":",
"msg",
"=",
"\"'nrows' must be an integer\"",
"if",
"(",
"nrows",
"is",
"not",
"None",
")",
":",
"if",
"is_float",
"(",
"nrows",
")",
":",
"if",
"(",
"int",
"(",
"nrows",
")",
"!=",
"nrows",
")",
":",
"raise",
"ValueError",
"(",
"msg",
")",
"nrows",
"=",
"int",
"(",
"nrows",
")",
"elif",
"(",
"not",
"is_integer",
"(",
"nrows",
")",
")",
":",
"raise",
"ValueError",
"(",
"msg",
")",
"return",
"nrows"
] | checks whether the nrows parameter for parsing is either an integer or float that can safely be cast to an integer without losing accuracy . | train | false |
140 | def test_possible_string_format_functions():
t = QTable([([1, 2] * u.m)])
t['col0'].info.format = '%.3f'
assert (t.pformat() == [' col0', ' m ', '-----', '1.000', '2.000'])
t['col0'].info.format = 'hi {:.3f}'
assert (t.pformat() == [' col0 ', ' m ', '--------', 'hi 1.000', 'hi 2.000'])
t['col0'].info.format = '.4f'
assert (t.pformat() == [' col0 ', ' m ', '------', '1.0000', '2.0000'])
| [
"def",
"test_possible_string_format_functions",
"(",
")",
":",
"t",
"=",
"QTable",
"(",
"[",
"(",
"[",
"1",
",",
"2",
"]",
"*",
"u",
".",
"m",
")",
"]",
")",
"t",
"[",
"'col0'",
"]",
".",
"info",
".",
"format",
"=",
"'%.3f'",
"assert",
"(",
"t",
".",
"pformat",
"(",
")",
"==",
"[",
"' col0'",
",",
"' m '",
",",
"'-----'",
",",
"'1.000'",
",",
"'2.000'",
"]",
")",
"t",
"[",
"'col0'",
"]",
".",
"info",
".",
"format",
"=",
"'hi {:.3f}'",
"assert",
"(",
"t",
".",
"pformat",
"(",
")",
"==",
"[",
"' col0 '",
",",
"' m '",
",",
"'--------'",
",",
"'hi 1.000'",
",",
"'hi 2.000'",
"]",
")",
"t",
"[",
"'col0'",
"]",
".",
"info",
".",
"format",
"=",
"'.4f'",
"assert",
"(",
"t",
".",
"pformat",
"(",
")",
"==",
"[",
"' col0 '",
",",
"' m '",
",",
"'------'",
",",
"'1.0000'",
",",
"'2.0000'",
"]",
")"
] | the quantityinfo info class for quantity implements a possible_string_format_functions() method that overrides the standard pprint . | train | false |
141 | def add_status_query_managers(sender, **kwargs):
if (not issubclass(sender, StatusModel)):
return
for (value, display) in getattr(sender, u'STATUS', ()):
try:
sender._meta.get_field(value)
raise ImproperlyConfigured((u"StatusModel: Model '%s' has a field named '%s' which conflicts with a status of the same name." % (sender.__name__, value)))
except FieldDoesNotExist:
pass
sender.add_to_class(value, QueryManager(status=value))
| [
"def",
"add_status_query_managers",
"(",
"sender",
",",
"**",
"kwargs",
")",
":",
"if",
"(",
"not",
"issubclass",
"(",
"sender",
",",
"StatusModel",
")",
")",
":",
"return",
"for",
"(",
"value",
",",
"display",
")",
"in",
"getattr",
"(",
"sender",
",",
"u'STATUS'",
",",
"(",
")",
")",
":",
"try",
":",
"sender",
".",
"_meta",
".",
"get_field",
"(",
"value",
")",
"raise",
"ImproperlyConfigured",
"(",
"(",
"u\"StatusModel: Model '%s' has a field named '%s' which conflicts with a status of the same name.\"",
"%",
"(",
"sender",
".",
"__name__",
",",
"value",
")",
")",
")",
"except",
"FieldDoesNotExist",
":",
"pass",
"sender",
".",
"add_to_class",
"(",
"value",
",",
"QueryManager",
"(",
"status",
"=",
"value",
")",
")"
] | add a querymanager for each status item dynamically . | train | false |
142 | def call_url(url, view_kwargs=None):
(func_name, func_data) = app.url_map.bind('').match(url)
if (view_kwargs is not None):
func_data.update(view_kwargs)
view_function = view_functions[func_name]
rv = view_function(**func_data)
(rv, _, _, _) = unpack(rv)
if (isinstance(rv, werkzeug.wrappers.BaseResponse) and (rv.status_code in REDIRECT_CODES)):
redirect_url = rv.headers['Location']
return call_url(redirect_url)
return rv
| [
"def",
"call_url",
"(",
"url",
",",
"view_kwargs",
"=",
"None",
")",
":",
"(",
"func_name",
",",
"func_data",
")",
"=",
"app",
".",
"url_map",
".",
"bind",
"(",
"''",
")",
".",
"match",
"(",
"url",
")",
"if",
"(",
"view_kwargs",
"is",
"not",
"None",
")",
":",
"func_data",
".",
"update",
"(",
"view_kwargs",
")",
"view_function",
"=",
"view_functions",
"[",
"func_name",
"]",
"rv",
"=",
"view_function",
"(",
"**",
"func_data",
")",
"(",
"rv",
",",
"_",
",",
"_",
",",
"_",
")",
"=",
"unpack",
"(",
"rv",
")",
"if",
"(",
"isinstance",
"(",
"rv",
",",
"werkzeug",
".",
"wrappers",
".",
"BaseResponse",
")",
"and",
"(",
"rv",
".",
"status_code",
"in",
"REDIRECT_CODES",
")",
")",
":",
"redirect_url",
"=",
"rv",
".",
"headers",
"[",
"'Location'",
"]",
"return",
"call_url",
"(",
"redirect_url",
")",
"return",
"rv"
] | look up and call view function by url . | train | false |
143 | def get_subclasses(c):
return (c.__subclasses__() + sum(map(get_subclasses, c.__subclasses__()), []))
| [
"def",
"get_subclasses",
"(",
"c",
")",
":",
"return",
"(",
"c",
".",
"__subclasses__",
"(",
")",
"+",
"sum",
"(",
"map",
"(",
"get_subclasses",
",",
"c",
".",
"__subclasses__",
"(",
")",
")",
",",
"[",
"]",
")",
")"
] | get all subclasses of a given class . | train | false |
146 | def function_noArgs():
return
| [
"def",
"function_noArgs",
"(",
")",
":",
"return"
] | a function which accepts no arguments at all . | train | false |
147 | def blacklist_check(path):
(head, tests_dir) = os.path.split(path.dirname)
if (tests_dir != u'tests'):
return True
(head, top_module) = os.path.split(head)
return (path.purebasename in IGNORED_TESTS.get(top_module, []))
| [
"def",
"blacklist_check",
"(",
"path",
")",
":",
"(",
"head",
",",
"tests_dir",
")",
"=",
"os",
".",
"path",
".",
"split",
"(",
"path",
".",
"dirname",
")",
"if",
"(",
"tests_dir",
"!=",
"u'tests'",
")",
":",
"return",
"True",
"(",
"head",
",",
"top_module",
")",
"=",
"os",
".",
"path",
".",
"split",
"(",
"head",
")",
"return",
"(",
"path",
".",
"purebasename",
"in",
"IGNORED_TESTS",
".",
"get",
"(",
"top_module",
",",
"[",
"]",
")",
")"
] | check if test is blacklisted and should be ignored . | train | false |
148 | def get_roles_for_user(user_db):
role_names = UserRoleAssignment.query(user=user_db.name).only('role').scalar('role')
result = Role.query(name__in=role_names)
return result
| [
"def",
"get_roles_for_user",
"(",
"user_db",
")",
":",
"role_names",
"=",
"UserRoleAssignment",
".",
"query",
"(",
"user",
"=",
"user_db",
".",
"name",
")",
".",
"only",
"(",
"'role'",
")",
".",
"scalar",
"(",
"'role'",
")",
"result",
"=",
"Role",
".",
"query",
"(",
"name__in",
"=",
"role_names",
")",
"return",
"result"
] | retrieve all the roles assigned to the provided user . | train | false |
149 | def getListTableElements(listTable):
listTableElements = []
for listTableValue in listTable.values():
listTableElements += listTableValue
return listTableElements
| [
"def",
"getListTableElements",
"(",
"listTable",
")",
":",
"listTableElements",
"=",
"[",
"]",
"for",
"listTableValue",
"in",
"listTable",
".",
"values",
"(",
")",
":",
"listTableElements",
"+=",
"listTableValue",
"return",
"listTableElements"
] | get all the element in a list table . | train | false |
151 | def set_cover(container, cover_path, report=None, options=None):
report = (report or (lambda x: x))
if (container.book_type == u'azw3'):
set_azw3_cover(container, cover_path, report, options=options)
else:
set_epub_cover(container, cover_path, report, options=options)
| [
"def",
"set_cover",
"(",
"container",
",",
"cover_path",
",",
"report",
"=",
"None",
",",
"options",
"=",
"None",
")",
":",
"report",
"=",
"(",
"report",
"or",
"(",
"lambda",
"x",
":",
"x",
")",
")",
"if",
"(",
"container",
".",
"book_type",
"==",
"u'azw3'",
")",
":",
"set_azw3_cover",
"(",
"container",
",",
"cover_path",
",",
"report",
",",
"options",
"=",
"options",
")",
"else",
":",
"set_epub_cover",
"(",
"container",
",",
"cover_path",
",",
"report",
",",
"options",
"=",
"options",
")"
] | set the cover of the book to the image pointed to by cover_path . | train | false |
152 | def _strip_rst_role(type_str):
match = REST_ROLE_PATTERN.match(type_str)
if match:
return match.group(1)
else:
return type_str
| [
"def",
"_strip_rst_role",
"(",
"type_str",
")",
":",
"match",
"=",
"REST_ROLE_PATTERN",
".",
"match",
"(",
"type_str",
")",
"if",
"match",
":",
"return",
"match",
".",
"group",
"(",
"1",
")",
"else",
":",
"return",
"type_str"
] | strip off the part looks like a rest role in type_str . | train | false |
154 | def get_vcs_root(path):
previous_path = path
while (get_vcs_info(path) is None):
path = abspardir(path)
if (path == previous_path):
return
else:
previous_path = path
return osp.abspath(path)
| [
"def",
"get_vcs_root",
"(",
"path",
")",
":",
"previous_path",
"=",
"path",
"while",
"(",
"get_vcs_info",
"(",
"path",
")",
"is",
"None",
")",
":",
"path",
"=",
"abspardir",
"(",
"path",
")",
"if",
"(",
"path",
"==",
"previous_path",
")",
":",
"return",
"else",
":",
"previous_path",
"=",
"path",
"return",
"osp",
".",
"abspath",
"(",
"path",
")"
] | return vcs root directory path return none if path is not within a supported vcs repository . | train | true |
155 | def get_ec2_driver(aws):
ec2 = get_driver(Provider.EC2)(aws['access_key'], aws['secret_access_token'], region=aws['region'])
return ec2
| [
"def",
"get_ec2_driver",
"(",
"aws",
")",
":",
"ec2",
"=",
"get_driver",
"(",
"Provider",
".",
"EC2",
")",
"(",
"aws",
"[",
"'access_key'",
"]",
",",
"aws",
"[",
"'secret_access_token'",
"]",
",",
"region",
"=",
"aws",
"[",
"'region'",
"]",
")",
"return",
"ec2"
] | get a libcloud ec2 driver given some credentials and other configuration . | train | false |
156 | @decorator.decorator
def outplace(f, clip, *a, **k):
newclip = clip.copy()
f(newclip, *a, **k)
return newclip
| [
"@",
"decorator",
".",
"decorator",
"def",
"outplace",
"(",
"f",
",",
"clip",
",",
"*",
"a",
",",
"**",
"k",
")",
":",
"newclip",
"=",
"clip",
".",
"copy",
"(",
")",
"f",
"(",
"newclip",
",",
"*",
"a",
",",
"**",
"k",
")",
"return",
"newclip"
] | applies f(clip . | train | false |
157 | def is_prerelease(version=None):
return (_get_candidate(get_complete_version(version)) != 'final')
| [
"def",
"is_prerelease",
"(",
"version",
"=",
"None",
")",
":",
"return",
"(",
"_get_candidate",
"(",
"get_complete_version",
"(",
"version",
")",
")",
"!=",
"'final'",
")"
] | attempt to determine if this is a pre-release using pep386/pep426 rules . | train | false |
159 | def virtual_interface_list(provider, names, **kwargs):
client = _get_client()
return client.extra_action(provider=provider, names=names, action='virtual_interface_list', **kwargs)
| [
"def",
"virtual_interface_list",
"(",
"provider",
",",
"names",
",",
"**",
"kwargs",
")",
":",
"client",
"=",
"_get_client",
"(",
")",
"return",
"client",
".",
"extra_action",
"(",
"provider",
"=",
"provider",
",",
"names",
"=",
"names",
",",
"action",
"=",
"'virtual_interface_list'",
",",
"**",
"kwargs",
")"
] | create private networks . | train | true |
160 | def _cache(bank, key, fun, **kwargs):
items = cache.fetch(bank, key)
if (items is None):
items = {}
try:
item_list = fun(**kwargs)
except CloudError as exc:
log.warning('There was a cloud error calling {0} with kwargs {1}: {2}'.format(fun, kwargs, exc))
for item in item_list:
items[item.name] = object_to_dict(item)
cache.store(bank, key, items)
return items
| [
"def",
"_cache",
"(",
"bank",
",",
"key",
",",
"fun",
",",
"**",
"kwargs",
")",
":",
"items",
"=",
"cache",
".",
"fetch",
"(",
"bank",
",",
"key",
")",
"if",
"(",
"items",
"is",
"None",
")",
":",
"items",
"=",
"{",
"}",
"try",
":",
"item_list",
"=",
"fun",
"(",
"**",
"kwargs",
")",
"except",
"CloudError",
"as",
"exc",
":",
"log",
".",
"warning",
"(",
"'There was a cloud error calling {0} with kwargs {1}: {2}'",
".",
"format",
"(",
"fun",
",",
"kwargs",
",",
"exc",
")",
")",
"for",
"item",
"in",
"item_list",
":",
"items",
"[",
"item",
".",
"name",
"]",
"=",
"object_to_dict",
"(",
"item",
")",
"cache",
".",
"store",
"(",
"bank",
",",
"key",
",",
"items",
")",
"return",
"items"
] | cache an azure arm object . | train | false |
161 | @register(u'yank-last-arg')
def yank_last_arg(event):
n = (event.arg if event.arg_present else None)
event.current_buffer.yank_last_arg(n)
| [
"@",
"register",
"(",
"u'yank-last-arg'",
")",
"def",
"yank_last_arg",
"(",
"event",
")",
":",
"n",
"=",
"(",
"event",
".",
"arg",
"if",
"event",
".",
"arg_present",
"else",
"None",
")",
"event",
".",
"current_buffer",
".",
"yank_last_arg",
"(",
"n",
")"
] | like yank_nth_arg . | train | false |
163 | def p_inclusive_or_expression_2(t):
pass
| [
"def",
"p_inclusive_or_expression_2",
"(",
"t",
")",
":",
"pass"
] | inclusive_or_expression : inclusive_or_expression or exclusive_or_expression . | train | false |
164 | def _ssh_args(ssh_bin, address, ec2_key_pair_file):
if (ec2_key_pair_file is None):
raise ValueError('SSH key file path is None')
return (ssh_bin + ['-i', ec2_key_pair_file, '-o', 'StrictHostKeyChecking=no', '-o', 'UserKnownHostsFile=/dev/null', ('hadoop@%s' % (address,))])
| [
"def",
"_ssh_args",
"(",
"ssh_bin",
",",
"address",
",",
"ec2_key_pair_file",
")",
":",
"if",
"(",
"ec2_key_pair_file",
"is",
"None",
")",
":",
"raise",
"ValueError",
"(",
"'SSH key file path is None'",
")",
"return",
"(",
"ssh_bin",
"+",
"[",
"'-i'",
",",
"ec2_key_pair_file",
",",
"'-o'",
",",
"'StrictHostKeyChecking=no'",
",",
"'-o'",
",",
"'UserKnownHostsFile=/dev/null'",
",",
"(",
"'hadoop@%s'",
"%",
"(",
"address",
",",
")",
")",
"]",
")"
] | helper method for :py:func:_ssh_run to build an argument list for subprocess . | train | false |
165 | def get_variable_values_sorted(variable):
if variable.is_continuous:
return []
try:
return sorted(variable.values, key=int)
except ValueError:
return variable.values
| [
"def",
"get_variable_values_sorted",
"(",
"variable",
")",
":",
"if",
"variable",
".",
"is_continuous",
":",
"return",
"[",
"]",
"try",
":",
"return",
"sorted",
"(",
"variable",
".",
"values",
",",
"key",
"=",
"int",
")",
"except",
"ValueError",
":",
"return",
"variable",
".",
"values"
] | return a list of sorted values for given attribute . | train | false |
169 | def make_twilio_request(method, uri, **kwargs):
headers = kwargs.get('headers', {})
user_agent = ('twilio-python/%s (Python %s)' % (__version__, platform.python_version()))
headers['User-Agent'] = user_agent
headers['Accept-Charset'] = 'utf-8'
if ((method == 'POST') and ('Content-Type' not in headers)):
headers['Content-Type'] = 'application/x-www-form-urlencoded'
kwargs['headers'] = headers
if ('Accept' not in headers):
headers['Accept'] = 'application/json'
if kwargs.pop('use_json_extension', False):
uri += '.json'
resp = make_request(method, uri, **kwargs)
if (not resp.ok):
try:
error = json.loads(resp.content)
code = error['code']
message = error['message']
except:
code = None
message = resp.content
raise TwilioRestException(status=resp.status_code, method=method, uri=resp.url, msg=message, code=code)
return resp
| [
"def",
"make_twilio_request",
"(",
"method",
",",
"uri",
",",
"**",
"kwargs",
")",
":",
"headers",
"=",
"kwargs",
".",
"get",
"(",
"'headers'",
",",
"{",
"}",
")",
"user_agent",
"=",
"(",
"'twilio-python/%s (Python %s)'",
"%",
"(",
"__version__",
",",
"platform",
".",
"python_version",
"(",
")",
")",
")",
"headers",
"[",
"'User-Agent'",
"]",
"=",
"user_agent",
"headers",
"[",
"'Accept-Charset'",
"]",
"=",
"'utf-8'",
"if",
"(",
"(",
"method",
"==",
"'POST'",
")",
"and",
"(",
"'Content-Type'",
"not",
"in",
"headers",
")",
")",
":",
"headers",
"[",
"'Content-Type'",
"]",
"=",
"'application/x-www-form-urlencoded'",
"kwargs",
"[",
"'headers'",
"]",
"=",
"headers",
"if",
"(",
"'Accept'",
"not",
"in",
"headers",
")",
":",
"headers",
"[",
"'Accept'",
"]",
"=",
"'application/json'",
"if",
"kwargs",
".",
"pop",
"(",
"'use_json_extension'",
",",
"False",
")",
":",
"uri",
"+=",
"'.json'",
"resp",
"=",
"make_request",
"(",
"method",
",",
"uri",
",",
"**",
"kwargs",
")",
"if",
"(",
"not",
"resp",
".",
"ok",
")",
":",
"try",
":",
"error",
"=",
"json",
".",
"loads",
"(",
"resp",
".",
"content",
")",
"code",
"=",
"error",
"[",
"'code'",
"]",
"message",
"=",
"error",
"[",
"'message'",
"]",
"except",
":",
"code",
"=",
"None",
"message",
"=",
"resp",
".",
"content",
"raise",
"TwilioRestException",
"(",
"status",
"=",
"resp",
".",
"status_code",
",",
"method",
"=",
"method",
",",
"uri",
"=",
"resp",
".",
"url",
",",
"msg",
"=",
"message",
",",
"code",
"=",
"code",
")",
"return",
"resp"
] | make a request to twilio . | train | false |
170 | def voronoi_cells(G, center_nodes, weight='weight'):
paths = nx.multi_source_dijkstra_path(G, center_nodes, weight=weight)
nearest = {v: p[0] for (v, p) in paths.items()}
cells = groups(nearest)
unreachable = (set(G) - set(nearest))
if unreachable:
cells['unreachable'] = unreachable
return cells
| [
"def",
"voronoi_cells",
"(",
"G",
",",
"center_nodes",
",",
"weight",
"=",
"'weight'",
")",
":",
"paths",
"=",
"nx",
".",
"multi_source_dijkstra_path",
"(",
"G",
",",
"center_nodes",
",",
"weight",
"=",
"weight",
")",
"nearest",
"=",
"{",
"v",
":",
"p",
"[",
"0",
"]",
"for",
"(",
"v",
",",
"p",
")",
"in",
"paths",
".",
"items",
"(",
")",
"}",
"cells",
"=",
"groups",
"(",
"nearest",
")",
"unreachable",
"=",
"(",
"set",
"(",
"G",
")",
"-",
"set",
"(",
"nearest",
")",
")",
"if",
"unreachable",
":",
"cells",
"[",
"'unreachable'",
"]",
"=",
"unreachable",
"return",
"cells"
] | returns the voronoi cells centered at center_nodes with respect to the shortest-path distance metric . | train | false |
171 | def sendMsg(oscAddress, dataArray=[], ipAddr='127.0.0.1', port=9000, typehint=None):
with oscLock:
outSocket.sendto(createBinaryMsg(oscAddress, dataArray, typehint), (ipAddr, port))
| [
"def",
"sendMsg",
"(",
"oscAddress",
",",
"dataArray",
"=",
"[",
"]",
",",
"ipAddr",
"=",
"'127.0.0.1'",
",",
"port",
"=",
"9000",
",",
"typehint",
"=",
"None",
")",
":",
"with",
"oscLock",
":",
"outSocket",
".",
"sendto",
"(",
"createBinaryMsg",
"(",
"oscAddress",
",",
"dataArray",
",",
"typehint",
")",
",",
"(",
"ipAddr",
",",
"port",
")",
")"
] | create and send normal osc msgs defaults to 127 . | train | false |
173 | def fromstr(string, **kwargs):
return GEOSGeometry(string, **kwargs)
| [
"def",
"fromstr",
"(",
"string",
",",
"**",
"kwargs",
")",
":",
"return",
"GEOSGeometry",
"(",
"string",
",",
"**",
"kwargs",
")"
] | given a string value . | train | false |
174 | def write_corpus_as_vw(corpus, filename):
LOG.debug(u'Writing corpus to: %s', filename)
corpus_size = 0
with utils.smart_open(filename, u'wb') as corpus_file:
for line in corpus_to_vw(corpus):
corpus_file.write((line.encode(u'utf-8') + '\n'))
corpus_size += 1
return corpus_size
| [
"def",
"write_corpus_as_vw",
"(",
"corpus",
",",
"filename",
")",
":",
"LOG",
".",
"debug",
"(",
"u'Writing corpus to: %s'",
",",
"filename",
")",
"corpus_size",
"=",
"0",
"with",
"utils",
".",
"smart_open",
"(",
"filename",
",",
"u'wb'",
")",
"as",
"corpus_file",
":",
"for",
"line",
"in",
"corpus_to_vw",
"(",
"corpus",
")",
":",
"corpus_file",
".",
"write",
"(",
"(",
"line",
".",
"encode",
"(",
"u'utf-8'",
")",
"+",
"'\\n'",
")",
")",
"corpus_size",
"+=",
"1",
"return",
"corpus_size"
] | iterate over corpus . | train | false |
175 | def font_priority(font):
style_normal = (font[u'font-style'] == u'normal')
width_normal = (font[u'font-stretch'] == u'normal')
weight_normal = (font[u'font-weight'] == u'normal')
num_normal = sum(filter(None, (style_normal, width_normal, weight_normal)))
subfamily_name = (font[u'wws_subfamily_name'] or font[u'preferred_subfamily_name'] or font[u'subfamily_name'])
if ((num_normal == 3) and (subfamily_name == u'Regular')):
return 0
if (num_normal == 3):
return 1
if (subfamily_name == u'Regular'):
return 2
return (3 + (3 - num_normal))
| [
"def",
"font_priority",
"(",
"font",
")",
":",
"style_normal",
"=",
"(",
"font",
"[",
"u'font-style'",
"]",
"==",
"u'normal'",
")",
"width_normal",
"=",
"(",
"font",
"[",
"u'font-stretch'",
"]",
"==",
"u'normal'",
")",
"weight_normal",
"=",
"(",
"font",
"[",
"u'font-weight'",
"]",
"==",
"u'normal'",
")",
"num_normal",
"=",
"sum",
"(",
"filter",
"(",
"None",
",",
"(",
"style_normal",
",",
"width_normal",
",",
"weight_normal",
")",
")",
")",
"subfamily_name",
"=",
"(",
"font",
"[",
"u'wws_subfamily_name'",
"]",
"or",
"font",
"[",
"u'preferred_subfamily_name'",
"]",
"or",
"font",
"[",
"u'subfamily_name'",
"]",
")",
"if",
"(",
"(",
"num_normal",
"==",
"3",
")",
"and",
"(",
"subfamily_name",
"==",
"u'Regular'",
")",
")",
":",
"return",
"0",
"if",
"(",
"num_normal",
"==",
"3",
")",
":",
"return",
"1",
"if",
"(",
"subfamily_name",
"==",
"u'Regular'",
")",
":",
"return",
"2",
"return",
"(",
"3",
"+",
"(",
"3",
"-",
"num_normal",
")",
")"
] | try to ensure that the "regular" face is the first font for a given family . | train | false |
176 | @utils.arg('--tenant', metavar='<tenant-id>', required=True, help=_('ID of tenant to delete quota for.'))
@utils.arg('--user', metavar='<user-id>', help=_('ID of user to delete quota for.'))
def do_quota_delete(cs, args):
cs.quotas.delete(args.tenant, user_id=args.user)
| [
"@",
"utils",
".",
"arg",
"(",
"'--tenant'",
",",
"metavar",
"=",
"'<tenant-id>'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"_",
"(",
"'ID of tenant to delete quota for.'",
")",
")",
"@",
"utils",
".",
"arg",
"(",
"'--user'",
",",
"metavar",
"=",
"'<user-id>'",
",",
"help",
"=",
"_",
"(",
"'ID of user to delete quota for.'",
")",
")",
"def",
"do_quota_delete",
"(",
"cs",
",",
"args",
")",
":",
"cs",
".",
"quotas",
".",
"delete",
"(",
"args",
".",
"tenant",
",",
"user_id",
"=",
"args",
".",
"user",
")"
] | delete quota for a tenant/user so their quota will revert back to default . | train | false |
177 | def parse_cost_limit(source):
cost_pos = source.pos
digits = parse_count(source)
try:
return int(digits)
except ValueError:
pass
raise error('bad fuzzy cost limit', source.string, cost_pos)
| [
"def",
"parse_cost_limit",
"(",
"source",
")",
":",
"cost_pos",
"=",
"source",
".",
"pos",
"digits",
"=",
"parse_count",
"(",
"source",
")",
"try",
":",
"return",
"int",
"(",
"digits",
")",
"except",
"ValueError",
":",
"pass",
"raise",
"error",
"(",
"'bad fuzzy cost limit'",
",",
"source",
".",
"string",
",",
"cost_pos",
")"
] | parses a cost limit . | train | false |
179 | def test_hermite_kochanek_bartels(Chart, datas):
chart = Chart(interpolate='hermite', interpolation_parameters={'type': 'kochanek_bartels', 'b': (-1), 'c': 1, 't': 1})
chart = make_data(chart, datas)
assert chart.render()
chart = Chart(interpolate='hermite', interpolation_parameters={'type': 'kochanek_bartels', 'b': (-1), 'c': (-8), 't': 0})
chart = make_data(chart, datas)
assert chart.render()
chart = Chart(interpolate='hermite', interpolation_parameters={'type': 'kochanek_bartels', 'b': 0, 'c': 10, 't': (-1)})
chart = make_data(chart, datas)
assert chart.render()
| [
"def",
"test_hermite_kochanek_bartels",
"(",
"Chart",
",",
"datas",
")",
":",
"chart",
"=",
"Chart",
"(",
"interpolate",
"=",
"'hermite'",
",",
"interpolation_parameters",
"=",
"{",
"'type'",
":",
"'kochanek_bartels'",
",",
"'b'",
":",
"(",
"-",
"1",
")",
",",
"'c'",
":",
"1",
",",
"'t'",
":",
"1",
"}",
")",
"chart",
"=",
"make_data",
"(",
"chart",
",",
"datas",
")",
"assert",
"chart",
".",
"render",
"(",
")",
"chart",
"=",
"Chart",
"(",
"interpolate",
"=",
"'hermite'",
",",
"interpolation_parameters",
"=",
"{",
"'type'",
":",
"'kochanek_bartels'",
",",
"'b'",
":",
"(",
"-",
"1",
")",
",",
"'c'",
":",
"(",
"-",
"8",
")",
",",
"'t'",
":",
"0",
"}",
")",
"chart",
"=",
"make_data",
"(",
"chart",
",",
"datas",
")",
"assert",
"chart",
".",
"render",
"(",
")",
"chart",
"=",
"Chart",
"(",
"interpolate",
"=",
"'hermite'",
",",
"interpolation_parameters",
"=",
"{",
"'type'",
":",
"'kochanek_bartels'",
",",
"'b'",
":",
"0",
",",
"'c'",
":",
"10",
",",
"'t'",
":",
"(",
"-",
"1",
")",
"}",
")",
"chart",
"=",
"make_data",
"(",
"chart",
",",
"datas",
")",
"assert",
"chart",
".",
"render",
"(",
")"
] | test hermite kochanek bartels interpolation . | train | false |
180 | def getIPx(domain):
try:
return socket.gethostbyname_ex(domain)[2]
except Exception:
return False
| [
"def",
"getIPx",
"(",
"domain",
")",
":",
"try",
":",
"return",
"socket",
".",
"gethostbyname_ex",
"(",
"domain",
")",
"[",
"2",
"]",
"except",
"Exception",
":",
"return",
"False"
] | this method returns an array containing one or more ip address strings that respond as the given domain name . | train | false |
181 | def find_variables(fstruct, fs_class=u'default'):
if (fs_class == u'default'):
fs_class = _default_fs_class(fstruct)
return _variables(fstruct, set(), fs_class, set())
| [
"def",
"find_variables",
"(",
"fstruct",
",",
"fs_class",
"=",
"u'default'",
")",
":",
"if",
"(",
"fs_class",
"==",
"u'default'",
")",
":",
"fs_class",
"=",
"_default_fs_class",
"(",
"fstruct",
")",
"return",
"_variables",
"(",
"fstruct",
",",
"set",
"(",
")",
",",
"fs_class",
",",
"set",
"(",
")",
")"
] | finds all substitutable variables . | train | false |
182 | def getElementsByLocalName(childNodes, localName):
elementsByLocalName = getChildElementsByLocalName(childNodes, localName)
for childNode in childNodes:
if (childNode.getNodeType() == 1):
elementsByLocalName += childNode.getElementsByLocalName(localName)
return elementsByLocalName
| [
"def",
"getElementsByLocalName",
"(",
"childNodes",
",",
"localName",
")",
":",
"elementsByLocalName",
"=",
"getChildElementsByLocalName",
"(",
"childNodes",
",",
"localName",
")",
"for",
"childNode",
"in",
"childNodes",
":",
"if",
"(",
"childNode",
".",
"getNodeType",
"(",
")",
"==",
"1",
")",
":",
"elementsByLocalName",
"+=",
"childNode",
".",
"getElementsByLocalName",
"(",
"localName",
")",
"return",
"elementsByLocalName"
] | get the descendents which have the given local name . | train | false |
183 | def loadIcon(stock_item_id):
stock_item = getattr(gtk, stock_item_id)
local_icon = os.path.join(GUI_DATA_PATH, 'icons', '16', ('%s.png' % stock_item))
if os.path.exists(local_icon):
im = gtk.Image()
im.set_from_file(local_icon)
im.show()
return im.get_pixbuf()
else:
icon_theme = gtk.IconTheme()
try:
icon = icon_theme.load_icon(stock_item, 16, ())
except:
icon = loadImage('missing-image.png').get_pixbuf()
return icon
| [
"def",
"loadIcon",
"(",
"stock_item_id",
")",
":",
"stock_item",
"=",
"getattr",
"(",
"gtk",
",",
"stock_item_id",
")",
"local_icon",
"=",
"os",
".",
"path",
".",
"join",
"(",
"GUI_DATA_PATH",
",",
"'icons'",
",",
"'16'",
",",
"(",
"'%s.png'",
"%",
"stock_item",
")",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"local_icon",
")",
":",
"im",
"=",
"gtk",
".",
"Image",
"(",
")",
"im",
".",
"set_from_file",
"(",
"local_icon",
")",
"im",
".",
"show",
"(",
")",
"return",
"im",
".",
"get_pixbuf",
"(",
")",
"else",
":",
"icon_theme",
"=",
"gtk",
".",
"IconTheme",
"(",
")",
"try",
":",
"icon",
"=",
"icon_theme",
".",
"load_icon",
"(",
"stock_item",
",",
"16",
",",
"(",
")",
")",
"except",
":",
"icon",
"=",
"loadImage",
"(",
"'missing-image.png'",
")",
".",
"get_pixbuf",
"(",
")",
"return",
"icon"
] | loads an icon to show it in the gui . | train | false |
186 | @qutescheme.add_handler('settings', backend=usertypes.Backend.QtWebKit)
def qute_settings(_url):
config_getter = functools.partial(objreg.get('config').get, raw=True)
html = jinja.render('settings.html', title='settings', config=configdata, confget=config_getter)
return ('text/html', html)
| [
"@",
"qutescheme",
".",
"add_handler",
"(",
"'settings'",
",",
"backend",
"=",
"usertypes",
".",
"Backend",
".",
"QtWebKit",
")",
"def",
"qute_settings",
"(",
"_url",
")",
":",
"config_getter",
"=",
"functools",
".",
"partial",
"(",
"objreg",
".",
"get",
"(",
"'config'",
")",
".",
"get",
",",
"raw",
"=",
"True",
")",
"html",
"=",
"jinja",
".",
"render",
"(",
"'settings.html'",
",",
"title",
"=",
"'settings'",
",",
"config",
"=",
"configdata",
",",
"confget",
"=",
"config_getter",
")",
"return",
"(",
"'text/html'",
",",
"html",
")"
] | handler for qute:settings . | train | false |
187 | def splitdrive(p):
return ('', p)
| [
"def",
"splitdrive",
"(",
"p",
")",
":",
"return",
"(",
"''",
",",
"p",
")"
] | split a pathname into drive/unc sharepoint and relative path specifiers . | train | false |
188 | def get_credit_providers(providers_list=None):
return CreditProvider.get_credit_providers(providers_list=providers_list)
| [
"def",
"get_credit_providers",
"(",
"providers_list",
"=",
"None",
")",
":",
"return",
"CreditProvider",
".",
"get_credit_providers",
"(",
"providers_list",
"=",
"providers_list",
")"
] | retrieve all available credit providers or filter on given providers_list . | train | false |
189 | def plot_matches(im1, im2, locs1, locs2, matchscores, show_below=True):
im3 = appendimages(im1, im2)
if show_below:
im3 = vstack((im3, im3))
imshow(im3)
cols1 = im1.shape[1]
for (i, m) in enumerate(matchscores):
if (m > 0):
plot([locs1[i][0], (locs2[m][0] + cols1)], [locs1[i][1], locs2[m][1]], 'c')
axis('off')
| [
"def",
"plot_matches",
"(",
"im1",
",",
"im2",
",",
"locs1",
",",
"locs2",
",",
"matchscores",
",",
"show_below",
"=",
"True",
")",
":",
"im3",
"=",
"appendimages",
"(",
"im1",
",",
"im2",
")",
"if",
"show_below",
":",
"im3",
"=",
"vstack",
"(",
"(",
"im3",
",",
"im3",
")",
")",
"imshow",
"(",
"im3",
")",
"cols1",
"=",
"im1",
".",
"shape",
"[",
"1",
"]",
"for",
"(",
"i",
",",
"m",
")",
"in",
"enumerate",
"(",
"matchscores",
")",
":",
"if",
"(",
"m",
">",
"0",
")",
":",
"plot",
"(",
"[",
"locs1",
"[",
"i",
"]",
"[",
"0",
"]",
",",
"(",
"locs2",
"[",
"m",
"]",
"[",
"0",
"]",
"+",
"cols1",
")",
"]",
",",
"[",
"locs1",
"[",
"i",
"]",
"[",
"1",
"]",
",",
"locs2",
"[",
"m",
"]",
"[",
"1",
"]",
"]",
",",
"'c'",
")",
"axis",
"(",
"'off'",
")"
] | show a figure with lines joining the accepted matches input: im1 . | train | false |
192 | def stop_app_instance(app_name, port):
if (not misc.is_app_name_valid(app_name)):
logging.error(('Unable to kill app process %s on port %d because of invalid name for application' % (app_name, int(port))))
return False
logging.info(('Stopping application %s' % app_name))
watch = ((('app___' + app_name) + '-') + str(port))
if (not monit_interface.stop(watch, is_group=False)):
logging.error('Unable to stop application server for app {0} on port {1}'.format(app_name, port))
return False
monit_config_file = '{}/appscale-{}.cfg'.format(MONIT_CONFIG_DIR, watch)
try:
os.remove(monit_config_file)
except OSError as os_error:
logging.error('Error deleting {0}'.format(monit_config_file))
return True
| [
"def",
"stop_app_instance",
"(",
"app_name",
",",
"port",
")",
":",
"if",
"(",
"not",
"misc",
".",
"is_app_name_valid",
"(",
"app_name",
")",
")",
":",
"logging",
".",
"error",
"(",
"(",
"'Unable to kill app process %s on port %d because of invalid name for application'",
"%",
"(",
"app_name",
",",
"int",
"(",
"port",
")",
")",
")",
")",
"return",
"False",
"logging",
".",
"info",
"(",
"(",
"'Stopping application %s'",
"%",
"app_name",
")",
")",
"watch",
"=",
"(",
"(",
"(",
"'app___'",
"+",
"app_name",
")",
"+",
"'-'",
")",
"+",
"str",
"(",
"port",
")",
")",
"if",
"(",
"not",
"monit_interface",
".",
"stop",
"(",
"watch",
",",
"is_group",
"=",
"False",
")",
")",
":",
"logging",
".",
"error",
"(",
"'Unable to stop application server for app {0} on port {1}'",
".",
"format",
"(",
"app_name",
",",
"port",
")",
")",
"return",
"False",
"monit_config_file",
"=",
"'{}/appscale-{}.cfg'",
".",
"format",
"(",
"MONIT_CONFIG_DIR",
",",
"watch",
")",
"try",
":",
"os",
".",
"remove",
"(",
"monit_config_file",
")",
"except",
"OSError",
"as",
"os_error",
":",
"logging",
".",
"error",
"(",
"'Error deleting {0}'",
".",
"format",
"(",
"monit_config_file",
")",
")",
"return",
"True"
] | stops a google app engine application process instance on current machine . | train | false |
193 | def _filter_schemas(schemas, schema_tables, exclude_table_columns):
return [_filter_schema(s, schema_tables, exclude_table_columns) for s in schemas]
| [
"def",
"_filter_schemas",
"(",
"schemas",
",",
"schema_tables",
",",
"exclude_table_columns",
")",
":",
"return",
"[",
"_filter_schema",
"(",
"s",
",",
"schema_tables",
",",
"exclude_table_columns",
")",
"for",
"s",
"in",
"schemas",
"]"
] | wrapper method for _filter_schema to filter multiple schemas . | train | true |
194 | def echo_class(klass, write=sys.stdout.write):
for (_, method) in inspect.getmembers(klass, inspect.ismethod):
echo_instancemethod(klass, method, write)
for (_, fn) in inspect.getmembers(klass, inspect.isfunction):
if is_static_method(fn, klass):
setattr(klass, name(fn), staticmethod(echo(fn, write)))
else:
echo_instancemethod(klass, fn, write)
| [
"def",
"echo_class",
"(",
"klass",
",",
"write",
"=",
"sys",
".",
"stdout",
".",
"write",
")",
":",
"for",
"(",
"_",
",",
"method",
")",
"in",
"inspect",
".",
"getmembers",
"(",
"klass",
",",
"inspect",
".",
"ismethod",
")",
":",
"echo_instancemethod",
"(",
"klass",
",",
"method",
",",
"write",
")",
"for",
"(",
"_",
",",
"fn",
")",
"in",
"inspect",
".",
"getmembers",
"(",
"klass",
",",
"inspect",
".",
"isfunction",
")",
":",
"if",
"is_static_method",
"(",
"fn",
",",
"klass",
")",
":",
"setattr",
"(",
"klass",
",",
"name",
"(",
"fn",
")",
",",
"staticmethod",
"(",
"echo",
"(",
"fn",
",",
"write",
")",
")",
")",
"else",
":",
"echo_instancemethod",
"(",
"klass",
",",
"fn",
",",
"write",
")"
] | echo calls to class methods and static functions . | train | false |
195 | def _clear_namespace():
ok_names = set(default_backend.__dict__)
ok_names.update(['gl2', 'glplus'])
NS = globals()
for name in list(NS.keys()):
if name.lower().startswith('gl'):
if (name not in ok_names):
del NS[name]
| [
"def",
"_clear_namespace",
"(",
")",
":",
"ok_names",
"=",
"set",
"(",
"default_backend",
".",
"__dict__",
")",
"ok_names",
".",
"update",
"(",
"[",
"'gl2'",
",",
"'glplus'",
"]",
")",
"NS",
"=",
"globals",
"(",
")",
"for",
"name",
"in",
"list",
"(",
"NS",
".",
"keys",
"(",
")",
")",
":",
"if",
"name",
".",
"lower",
"(",
")",
".",
"startswith",
"(",
"'gl'",
")",
":",
"if",
"(",
"name",
"not",
"in",
"ok_names",
")",
":",
"del",
"NS",
"[",
"name",
"]"
] | clear names that are not part of the strict es api . | train | true |
196 | def iterServices(xrd_tree):
xrd = getYadisXRD(xrd_tree)
return prioSort(xrd.findall(service_tag))
| [
"def",
"iterServices",
"(",
"xrd_tree",
")",
":",
"xrd",
"=",
"getYadisXRD",
"(",
"xrd_tree",
")",
"return",
"prioSort",
"(",
"xrd",
".",
"findall",
"(",
"service_tag",
")",
")"
] | return an iterable over the service elements in the yadis xrd sorted by priority . | train | false |
197 | def _download_restricted(url, filename, age):
params = {u'age_limit': age, u'skip_download': True, u'writeinfojson': True, u'outtmpl': u'%(id)s.%(ext)s'}
ydl = YoutubeDL(params)
ydl.add_default_info_extractors()
json_filename = (os.path.splitext(filename)[0] + u'.info.json')
try_rm(json_filename)
ydl.download([url])
res = os.path.exists(json_filename)
try_rm(json_filename)
return res
| [
"def",
"_download_restricted",
"(",
"url",
",",
"filename",
",",
"age",
")",
":",
"params",
"=",
"{",
"u'age_limit'",
":",
"age",
",",
"u'skip_download'",
":",
"True",
",",
"u'writeinfojson'",
":",
"True",
",",
"u'outtmpl'",
":",
"u'%(id)s.%(ext)s'",
"}",
"ydl",
"=",
"YoutubeDL",
"(",
"params",
")",
"ydl",
".",
"add_default_info_extractors",
"(",
")",
"json_filename",
"=",
"(",
"os",
".",
"path",
".",
"splitext",
"(",
"filename",
")",
"[",
"0",
"]",
"+",
"u'.info.json'",
")",
"try_rm",
"(",
"json_filename",
")",
"ydl",
".",
"download",
"(",
"[",
"url",
"]",
")",
"res",
"=",
"os",
".",
"path",
".",
"exists",
"(",
"json_filename",
")",
"try_rm",
"(",
"json_filename",
")",
"return",
"res"
] | returns true if the file has been downloaded . | train | false |
200 | def _GetLines(line_strings):
lines = []
for line_string in line_strings:
line = list(map(int, line_string.split('-', 1)))
if (line[0] < 1):
raise errors.YapfError(('invalid start of line range: %r' % line))
if (line[0] > line[1]):
raise errors.YapfError('end comes before start in line range: %r', line)
lines.append(tuple(line))
return lines
| [
"def",
"_GetLines",
"(",
"line_strings",
")",
":",
"lines",
"=",
"[",
"]",
"for",
"line_string",
"in",
"line_strings",
":",
"line",
"=",
"list",
"(",
"map",
"(",
"int",
",",
"line_string",
".",
"split",
"(",
"'-'",
",",
"1",
")",
")",
")",
"if",
"(",
"line",
"[",
"0",
"]",
"<",
"1",
")",
":",
"raise",
"errors",
".",
"YapfError",
"(",
"(",
"'invalid start of line range: %r'",
"%",
"line",
")",
")",
"if",
"(",
"line",
"[",
"0",
"]",
">",
"line",
"[",
"1",
"]",
")",
":",
"raise",
"errors",
".",
"YapfError",
"(",
"'end comes before start in line range: %r'",
",",
"line",
")",
"lines",
".",
"append",
"(",
"tuple",
"(",
"line",
")",
")",
"return",
"lines"
] | parses the start and end lines from a line string like start-end . | train | false |
202 | @require_global_staff
@require_POST
def enable_certificate_generation(request, course_id=None):
course_key = CourseKey.from_string(course_id)
is_enabled = (request.POST.get('certificates-enabled', 'false') == 'true')
certs_api.set_cert_generation_enabled(course_key, is_enabled)
return redirect(_instructor_dash_url(course_key, section='certificates'))
| [
"@",
"require_global_staff",
"@",
"require_POST",
"def",
"enable_certificate_generation",
"(",
"request",
",",
"course_id",
"=",
"None",
")",
":",
"course_key",
"=",
"CourseKey",
".",
"from_string",
"(",
"course_id",
")",
"is_enabled",
"=",
"(",
"request",
".",
"POST",
".",
"get",
"(",
"'certificates-enabled'",
",",
"'false'",
")",
"==",
"'true'",
")",
"certs_api",
".",
"set_cert_generation_enabled",
"(",
"course_key",
",",
"is_enabled",
")",
"return",
"redirect",
"(",
"_instructor_dash_url",
"(",
"course_key",
",",
"section",
"=",
"'certificates'",
")",
")"
] | enable/disable self-generated certificates for a course . | train | false |
204 | def compute_g(n):
a = compute_a((2 * n))
g = []
for k in range(n):
g.append(((mp.sqrt(2) * mp.rf(0.5, k)) * a[(2 * k)]))
return g
| [
"def",
"compute_g",
"(",
"n",
")",
":",
"a",
"=",
"compute_a",
"(",
"(",
"2",
"*",
"n",
")",
")",
"g",
"=",
"[",
"]",
"for",
"k",
"in",
"range",
"(",
"n",
")",
":",
"g",
".",
"append",
"(",
"(",
"(",
"mp",
".",
"sqrt",
"(",
"2",
")",
"*",
"mp",
".",
"rf",
"(",
"0.5",
",",
"k",
")",
")",
"*",
"a",
"[",
"(",
"2",
"*",
"k",
")",
"]",
")",
")",
"return",
"g"
] | g_k from dlmf 5 . | train | false |
205 | def set_special(user, special, cmd):
lst = list_tab(user)
for cron in lst['special']:
if ((special == cron['spec']) and (cmd == cron['cmd'])):
return 'present'
spec = {'spec': special, 'cmd': cmd}
lst['special'].append(spec)
comdat = _write_cron_lines(user, _render_tab(lst))
if comdat['retcode']:
return comdat['stderr']
return 'new'
| [
"def",
"set_special",
"(",
"user",
",",
"special",
",",
"cmd",
")",
":",
"lst",
"=",
"list_tab",
"(",
"user",
")",
"for",
"cron",
"in",
"lst",
"[",
"'special'",
"]",
":",
"if",
"(",
"(",
"special",
"==",
"cron",
"[",
"'spec'",
"]",
")",
"and",
"(",
"cmd",
"==",
"cron",
"[",
"'cmd'",
"]",
")",
")",
":",
"return",
"'present'",
"spec",
"=",
"{",
"'spec'",
":",
"special",
",",
"'cmd'",
":",
"cmd",
"}",
"lst",
"[",
"'special'",
"]",
".",
"append",
"(",
"spec",
")",
"comdat",
"=",
"_write_cron_lines",
"(",
"user",
",",
"_render_tab",
"(",
"lst",
")",
")",
"if",
"comdat",
"[",
"'retcode'",
"]",
":",
"return",
"comdat",
"[",
"'stderr'",
"]",
"return",
"'new'"
] | set up a special command in the crontab . | train | false |
206 | def set_diff_chunk_generator_class(renderer):
assert renderer
globals()[u'_generator'] = renderer
| [
"def",
"set_diff_chunk_generator_class",
"(",
"renderer",
")",
":",
"assert",
"renderer",
"globals",
"(",
")",
"[",
"u'_generator'",
"]",
"=",
"renderer"
] | sets the diffchunkgenerator class used for generating chunks . | train | false |
207 | def _should_use_proxy(url, no_proxy=None):
if (no_proxy is None):
no_proxy_effective = os.environ.get('no_proxy', '')
else:
no_proxy_effective = no_proxy
urlObj = urlparse_.urlparse(_url_as_string(url))
for np in [h.strip() for h in no_proxy_effective.split(',')]:
if (urlObj.hostname == np):
return False
return True
| [
"def",
"_should_use_proxy",
"(",
"url",
",",
"no_proxy",
"=",
"None",
")",
":",
"if",
"(",
"no_proxy",
"is",
"None",
")",
":",
"no_proxy_effective",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'no_proxy'",
",",
"''",
")",
"else",
":",
"no_proxy_effective",
"=",
"no_proxy",
"urlObj",
"=",
"urlparse_",
".",
"urlparse",
"(",
"_url_as_string",
"(",
"url",
")",
")",
"for",
"np",
"in",
"[",
"h",
".",
"strip",
"(",
")",
"for",
"h",
"in",
"no_proxy_effective",
".",
"split",
"(",
"','",
")",
"]",
":",
"if",
"(",
"urlObj",
".",
"hostname",
"==",
"np",
")",
":",
"return",
"False",
"return",
"True"
] | determines whether a proxy should be used to open a connection to the specified url . | train | false |
208 | def trim_line(line, column=0):
line = line.strip(u'\n')
ll = len(line)
if (ll <= 150):
return line
if (column > ll):
column = ll
start = max((column - 60), 0)
if (start < 5):
start = 0
end = min((start + 140), ll)
if (end > (ll - 5)):
end = ll
if (end == ll):
start = max((end - 140), 0)
line = line[start:end]
if (end < ll):
line += u' {snip}'
if (start > 0):
line = (u'{snip} ' + line)
return line
| [
"def",
"trim_line",
"(",
"line",
",",
"column",
"=",
"0",
")",
":",
"line",
"=",
"line",
".",
"strip",
"(",
"u'\\n'",
")",
"ll",
"=",
"len",
"(",
"line",
")",
"if",
"(",
"ll",
"<=",
"150",
")",
":",
"return",
"line",
"if",
"(",
"column",
">",
"ll",
")",
":",
"column",
"=",
"ll",
"start",
"=",
"max",
"(",
"(",
"column",
"-",
"60",
")",
",",
"0",
")",
"if",
"(",
"start",
"<",
"5",
")",
":",
"start",
"=",
"0",
"end",
"=",
"min",
"(",
"(",
"start",
"+",
"140",
")",
",",
"ll",
")",
"if",
"(",
"end",
">",
"(",
"ll",
"-",
"5",
")",
")",
":",
"end",
"=",
"ll",
"if",
"(",
"end",
"==",
"ll",
")",
":",
"start",
"=",
"max",
"(",
"(",
"end",
"-",
"140",
")",
",",
"0",
")",
"line",
"=",
"line",
"[",
"start",
":",
"end",
"]",
"if",
"(",
"end",
"<",
"ll",
")",
":",
"line",
"+=",
"u' {snip}'",
"if",
"(",
"start",
">",
"0",
")",
":",
"line",
"=",
"(",
"u'{snip} '",
"+",
"line",
")",
"return",
"line"
] | trims a line down to a goal of 140 characters . | train | false |
210 | def _task_info_format(task_info_ref):
if (task_info_ref is None):
return {}
return {'task_id': task_info_ref['task_id'], 'input': task_info_ref['input'], 'result': task_info_ref['result'], 'message': task_info_ref['message']}
| [
"def",
"_task_info_format",
"(",
"task_info_ref",
")",
":",
"if",
"(",
"task_info_ref",
"is",
"None",
")",
":",
"return",
"{",
"}",
"return",
"{",
"'task_id'",
":",
"task_info_ref",
"[",
"'task_id'",
"]",
",",
"'input'",
":",
"task_info_ref",
"[",
"'input'",
"]",
",",
"'result'",
":",
"task_info_ref",
"[",
"'result'",
"]",
",",
"'message'",
":",
"task_info_ref",
"[",
"'message'",
"]",
"}"
] | format a task info ref for consumption outside of this module . | train | false |
211 | def pandas_read_text(reader, b, header, kwargs, dtypes=None, columns=None, write_header=True, enforce=False):
bio = BytesIO()
if (write_header and (not b.startswith(header.rstrip()))):
bio.write(header)
bio.write(b)
bio.seek(0)
df = reader(bio, **kwargs)
if dtypes:
coerce_dtypes(df, dtypes)
if (enforce and columns and (list(df.columns) != list(columns))):
raise ValueError('Columns do not match', df.columns, columns)
elif columns:
df.columns = columns
return df
| [
"def",
"pandas_read_text",
"(",
"reader",
",",
"b",
",",
"header",
",",
"kwargs",
",",
"dtypes",
"=",
"None",
",",
"columns",
"=",
"None",
",",
"write_header",
"=",
"True",
",",
"enforce",
"=",
"False",
")",
":",
"bio",
"=",
"BytesIO",
"(",
")",
"if",
"(",
"write_header",
"and",
"(",
"not",
"b",
".",
"startswith",
"(",
"header",
".",
"rstrip",
"(",
")",
")",
")",
")",
":",
"bio",
".",
"write",
"(",
"header",
")",
"bio",
".",
"write",
"(",
"b",
")",
"bio",
".",
"seek",
"(",
"0",
")",
"df",
"=",
"reader",
"(",
"bio",
",",
"**",
"kwargs",
")",
"if",
"dtypes",
":",
"coerce_dtypes",
"(",
"df",
",",
"dtypes",
")",
"if",
"(",
"enforce",
"and",
"columns",
"and",
"(",
"list",
"(",
"df",
".",
"columns",
")",
"!=",
"list",
"(",
"columns",
")",
")",
")",
":",
"raise",
"ValueError",
"(",
"'Columns do not match'",
",",
"df",
".",
"columns",
",",
"columns",
")",
"elif",
"columns",
":",
"df",
".",
"columns",
"=",
"columns",
"return",
"df"
] | convert a block of bytes to a pandas dataframe parameters reader : callable pd . | train | false |
212 | def _gaussian_loglik_scorer(est, X, y=None):
precision = est.get_precision()
(n_samples, n_features) = X.shape
log_like = np.zeros(n_samples)
log_like = ((-0.5) * (X * np.dot(X, precision)).sum(axis=1))
log_like -= (0.5 * ((n_features * log((2.0 * np.pi))) - _logdet(precision)))
out = np.mean(log_like)
return out
| [
"def",
"_gaussian_loglik_scorer",
"(",
"est",
",",
"X",
",",
"y",
"=",
"None",
")",
":",
"precision",
"=",
"est",
".",
"get_precision",
"(",
")",
"(",
"n_samples",
",",
"n_features",
")",
"=",
"X",
".",
"shape",
"log_like",
"=",
"np",
".",
"zeros",
"(",
"n_samples",
")",
"log_like",
"=",
"(",
"(",
"-",
"0.5",
")",
"*",
"(",
"X",
"*",
"np",
".",
"dot",
"(",
"X",
",",
"precision",
")",
")",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
")",
"log_like",
"-=",
"(",
"0.5",
"*",
"(",
"(",
"n_features",
"*",
"log",
"(",
"(",
"2.0",
"*",
"np",
".",
"pi",
")",
")",
")",
"-",
"_logdet",
"(",
"precision",
")",
")",
")",
"out",
"=",
"np",
".",
"mean",
"(",
"log_like",
")",
"return",
"out"
] | compute the gaussian log likelihood of x under the model in est . | train | false |
213 | def getSSLContext():
keyfile = os.path.join(_GAME_DIR, 'server', 'ssl.key')
certfile = os.path.join(_GAME_DIR, 'server', 'ssl.cert')
verify_SSL_key_and_cert(keyfile, certfile)
return twisted_ssl.DefaultOpenSSLContextFactory(keyfile, certfile)
| [
"def",
"getSSLContext",
"(",
")",
":",
"keyfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"_GAME_DIR",
",",
"'server'",
",",
"'ssl.key'",
")",
"certfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"_GAME_DIR",
",",
"'server'",
",",
"'ssl.cert'",
")",
"verify_SSL_key_and_cert",
"(",
"keyfile",
",",
"certfile",
")",
"return",
"twisted_ssl",
".",
"DefaultOpenSSLContextFactory",
"(",
"keyfile",
",",
"certfile",
")"
] | this is called by the portal when creating the ssl context server-side . | train | false |
214 | def mod_aggregate(low, chunks, running):
rules = []
agg_enabled = ['append', 'insert']
if (low.get('fun') not in agg_enabled):
return low
for chunk in chunks:
tag = salt.utils.gen_state_tag(chunk)
if (tag in running):
continue
if (chunk.get('state') == 'iptables'):
if ('__agg__' in chunk):
continue
if (chunk.get('fun') != low.get('fun')):
continue
if (chunk not in rules):
rules.append(chunk)
chunk['__agg__'] = True
if rules:
if ('rules' in low):
low['rules'].extend(rules)
else:
low['rules'] = rules
return low
| [
"def",
"mod_aggregate",
"(",
"low",
",",
"chunks",
",",
"running",
")",
":",
"rules",
"=",
"[",
"]",
"agg_enabled",
"=",
"[",
"'append'",
",",
"'insert'",
"]",
"if",
"(",
"low",
".",
"get",
"(",
"'fun'",
")",
"not",
"in",
"agg_enabled",
")",
":",
"return",
"low",
"for",
"chunk",
"in",
"chunks",
":",
"tag",
"=",
"salt",
".",
"utils",
".",
"gen_state_tag",
"(",
"chunk",
")",
"if",
"(",
"tag",
"in",
"running",
")",
":",
"continue",
"if",
"(",
"chunk",
".",
"get",
"(",
"'state'",
")",
"==",
"'iptables'",
")",
":",
"if",
"(",
"'__agg__'",
"in",
"chunk",
")",
":",
"continue",
"if",
"(",
"chunk",
".",
"get",
"(",
"'fun'",
")",
"!=",
"low",
".",
"get",
"(",
"'fun'",
")",
")",
":",
"continue",
"if",
"(",
"chunk",
"not",
"in",
"rules",
")",
":",
"rules",
".",
"append",
"(",
"chunk",
")",
"chunk",
"[",
"'__agg__'",
"]",
"=",
"True",
"if",
"rules",
":",
"if",
"(",
"'rules'",
"in",
"low",
")",
":",
"low",
"[",
"'rules'",
"]",
".",
"extend",
"(",
"rules",
")",
"else",
":",
"low",
"[",
"'rules'",
"]",
"=",
"rules",
"return",
"low"
] | the mod_aggregate function which looks up all packages in the available low chunks and merges them into a single pkgs ref in the present low data . | train | false |
215 | def es_delete_cmd(index, noinput=False, log=log):
try:
indexes = [name for (name, count) in get_indexes()]
except ES_EXCEPTIONS:
log.error('Your elasticsearch process is not running or ES_URLS is set wrong in your settings_local.py file.')
return
if (index not in indexes):
log.error('Index "%s" is not a valid index.', index)
return
if ((index in all_read_indexes()) and (not noinput)):
ret = raw_input(('"%s" is a read index. Are you sure you want to delete it? (yes/no) ' % index))
if (ret != 'yes'):
log.info('Not deleting the index.')
return
log.info('Deleting index "%s"...', index)
delete_index(index)
log.info('Done!')
| [
"def",
"es_delete_cmd",
"(",
"index",
",",
"noinput",
"=",
"False",
",",
"log",
"=",
"log",
")",
":",
"try",
":",
"indexes",
"=",
"[",
"name",
"for",
"(",
"name",
",",
"count",
")",
"in",
"get_indexes",
"(",
")",
"]",
"except",
"ES_EXCEPTIONS",
":",
"log",
".",
"error",
"(",
"'Your elasticsearch process is not running or ES_URLS is set wrong in your settings_local.py file.'",
")",
"return",
"if",
"(",
"index",
"not",
"in",
"indexes",
")",
":",
"log",
".",
"error",
"(",
"'Index \"%s\" is not a valid index.'",
",",
"index",
")",
"return",
"if",
"(",
"(",
"index",
"in",
"all_read_indexes",
"(",
")",
")",
"and",
"(",
"not",
"noinput",
")",
")",
":",
"ret",
"=",
"raw_input",
"(",
"(",
"'\"%s\" is a read index. Are you sure you want to delete it? (yes/no) '",
"%",
"index",
")",
")",
"if",
"(",
"ret",
"!=",
"'yes'",
")",
":",
"log",
".",
"info",
"(",
"'Not deleting the index.'",
")",
"return",
"log",
".",
"info",
"(",
"'Deleting index \"%s\"...'",
",",
"index",
")",
"delete_index",
"(",
"index",
")",
"log",
".",
"info",
"(",
"'Done!'",
")"
] | deletes an index . | train | false |
216 | def get_subset_from_bitstring(super_set, bitstring):
if (len(super_set) != len(bitstring)):
raise ValueError('The sizes of the lists are not equal')
return [super_set[i] for (i, j) in enumerate(bitstring) if (bitstring[i] == '1')]
| [
"def",
"get_subset_from_bitstring",
"(",
"super_set",
",",
"bitstring",
")",
":",
"if",
"(",
"len",
"(",
"super_set",
")",
"!=",
"len",
"(",
"bitstring",
")",
")",
":",
"raise",
"ValueError",
"(",
"'The sizes of the lists are not equal'",
")",
"return",
"[",
"super_set",
"[",
"i",
"]",
"for",
"(",
"i",
",",
"j",
")",
"in",
"enumerate",
"(",
"bitstring",
")",
"if",
"(",
"bitstring",
"[",
"i",
"]",
"==",
"'1'",
")",
"]"
] | gets the subset defined by the bitstring . | train | false |
217 | def get_pending_computer_name():
current = get_computer_name()
pending = __salt__['reg.read_value']('HKLM', 'SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters', 'NV Hostname')['vdata']
if pending:
return (pending if (pending != current) else None)
return False
| [
"def",
"get_pending_computer_name",
"(",
")",
":",
"current",
"=",
"get_computer_name",
"(",
")",
"pending",
"=",
"__salt__",
"[",
"'reg.read_value'",
"]",
"(",
"'HKLM'",
",",
"'SYSTEM\\\\CurrentControlSet\\\\Services\\\\Tcpip\\\\Parameters'",
",",
"'NV Hostname'",
")",
"[",
"'vdata'",
"]",
"if",
"pending",
":",
"return",
"(",
"pending",
"if",
"(",
"pending",
"!=",
"current",
")",
"else",
"None",
")",
"return",
"False"
] | get a pending computer name . | train | false |
220 | def migrate_registrations_q5_metadata(schema):
registrations = Node.find((Q('is_registration', 'eq', True) & Q('registered_schema', 'eq', schema)))
total_reg = registrations.count()
logger.info('Examining {} registrations for q5 metadata'.format(total_reg))
reg_count = 0
for reg in registrations:
reg_count += 1
if reg.registered_meta.get(schema._id, {}).get('q5', {}).get('value', {}):
reg.registered_meta[schema._id]['q5']['value'] = reg.registered_meta[schema._id]['q5']['value'].rstrip()
reg.save()
logger.info('{}/{} Migrated q5 response for {}'.format(reg_count, total_reg, reg._id))
else:
logger.info('{}/{} q5 not answered. No change needed for {}.'.format(reg_count, total_reg, reg._id))
| [
"def",
"migrate_registrations_q5_metadata",
"(",
"schema",
")",
":",
"registrations",
"=",
"Node",
".",
"find",
"(",
"(",
"Q",
"(",
"'is_registration'",
",",
"'eq'",
",",
"True",
")",
"&",
"Q",
"(",
"'registered_schema'",
",",
"'eq'",
",",
"schema",
")",
")",
")",
"total_reg",
"=",
"registrations",
".",
"count",
"(",
")",
"logger",
".",
"info",
"(",
"'Examining {} registrations for q5 metadata'",
".",
"format",
"(",
"total_reg",
")",
")",
"reg_count",
"=",
"0",
"for",
"reg",
"in",
"registrations",
":",
"reg_count",
"+=",
"1",
"if",
"reg",
".",
"registered_meta",
".",
"get",
"(",
"schema",
".",
"_id",
",",
"{",
"}",
")",
".",
"get",
"(",
"'q5'",
",",
"{",
"}",
")",
".",
"get",
"(",
"'value'",
",",
"{",
"}",
")",
":",
"reg",
".",
"registered_meta",
"[",
"schema",
".",
"_id",
"]",
"[",
"'q5'",
"]",
"[",
"'value'",
"]",
"=",
"reg",
".",
"registered_meta",
"[",
"schema",
".",
"_id",
"]",
"[",
"'q5'",
"]",
"[",
"'value'",
"]",
".",
"rstrip",
"(",
")",
"reg",
".",
"save",
"(",
")",
"logger",
".",
"info",
"(",
"'{}/{} Migrated q5 response for {}'",
".",
"format",
"(",
"reg_count",
",",
"total_reg",
",",
"reg",
".",
"_id",
")",
")",
"else",
":",
"logger",
".",
"info",
"(",
"'{}/{} q5 not answered. No change needed for {}.'",
".",
"format",
"(",
"reg_count",
",",
"total_reg",
",",
"reg",
".",
"_id",
")",
")"
] | finds prereg challenge registrations whose registered_meta includes q5 and corrects . | train | false |
221 | def get_random_user_agent():
return random.choice(user_agents_list)
| [
"def",
"get_random_user_agent",
"(",
")",
":",
"return",
"random",
".",
"choice",
"(",
"user_agents_list",
")"
] | get a random user agent string . | train | false |
223 | def get_preferred_submodules():
if ('submodules' in modules_db):
return modules_db['submodules']
mods = ['numpy', 'scipy', 'sympy', 'pandas', 'networkx', 'statsmodels', 'matplotlib', 'sklearn', 'skimage', 'mpmath', 'os', 'PIL', 'OpenGL', 'array', 'audioop', 'binascii', 'cPickle', 'cStringIO', 'cmath', 'collections', 'datetime', 'errno', 'exceptions', 'gc', 'imageop', 'imp', 'itertools', 'marshal', 'math', 'mmap', 'msvcrt', 'nt', 'operator', 'parser', 'rgbimg', 'signal', 'strop', 'sys', 'thread', 'time', 'wx', 'xxsubtype', 'zipimport', 'zlib', 'nose', 'PyQt4', 'PySide', 'os.path']
submodules = []
for m in mods:
submods = get_submodules(m)
submodules += submods
modules_db['submodules'] = submodules
return submodules
| [
"def",
"get_preferred_submodules",
"(",
")",
":",
"if",
"(",
"'submodules'",
"in",
"modules_db",
")",
":",
"return",
"modules_db",
"[",
"'submodules'",
"]",
"mods",
"=",
"[",
"'numpy'",
",",
"'scipy'",
",",
"'sympy'",
",",
"'pandas'",
",",
"'networkx'",
",",
"'statsmodels'",
",",
"'matplotlib'",
",",
"'sklearn'",
",",
"'skimage'",
",",
"'mpmath'",
",",
"'os'",
",",
"'PIL'",
",",
"'OpenGL'",
",",
"'array'",
",",
"'audioop'",
",",
"'binascii'",
",",
"'cPickle'",
",",
"'cStringIO'",
",",
"'cmath'",
",",
"'collections'",
",",
"'datetime'",
",",
"'errno'",
",",
"'exceptions'",
",",
"'gc'",
",",
"'imageop'",
",",
"'imp'",
",",
"'itertools'",
",",
"'marshal'",
",",
"'math'",
",",
"'mmap'",
",",
"'msvcrt'",
",",
"'nt'",
",",
"'operator'",
",",
"'parser'",
",",
"'rgbimg'",
",",
"'signal'",
",",
"'strop'",
",",
"'sys'",
",",
"'thread'",
",",
"'time'",
",",
"'wx'",
",",
"'xxsubtype'",
",",
"'zipimport'",
",",
"'zlib'",
",",
"'nose'",
",",
"'PyQt4'",
",",
"'PySide'",
",",
"'os.path'",
"]",
"submodules",
"=",
"[",
"]",
"for",
"m",
"in",
"mods",
":",
"submods",
"=",
"get_submodules",
"(",
"m",
")",
"submodules",
"+=",
"submods",
"modules_db",
"[",
"'submodules'",
"]",
"=",
"submodules",
"return",
"submodules"
] | get all submodules of the main scientific modules and others of our interest . | train | false |
224 | @task
def mongorestore(ctx, path, drop=False):
db = settings.DB_NAME
port = settings.DB_PORT
cmd = 'mongorestore --db {db} --port {port}'.format(db=db, port=port, pty=True)
if settings.DB_USER:
cmd += ' --username {0}'.format(settings.DB_USER)
if settings.DB_PASS:
cmd += ' --password {0}'.format(settings.DB_PASS)
if drop:
cmd += ' --drop'
cmd += (' ' + path)
ctx.run(cmd, echo=True)
| [
"@",
"task",
"def",
"mongorestore",
"(",
"ctx",
",",
"path",
",",
"drop",
"=",
"False",
")",
":",
"db",
"=",
"settings",
".",
"DB_NAME",
"port",
"=",
"settings",
".",
"DB_PORT",
"cmd",
"=",
"'mongorestore --db {db} --port {port}'",
".",
"format",
"(",
"db",
"=",
"db",
",",
"port",
"=",
"port",
",",
"pty",
"=",
"True",
")",
"if",
"settings",
".",
"DB_USER",
":",
"cmd",
"+=",
"' --username {0}'",
".",
"format",
"(",
"settings",
".",
"DB_USER",
")",
"if",
"settings",
".",
"DB_PASS",
":",
"cmd",
"+=",
"' --password {0}'",
".",
"format",
"(",
"settings",
".",
"DB_PASS",
")",
"if",
"drop",
":",
"cmd",
"+=",
"' --drop'",
"cmd",
"+=",
"(",
"' '",
"+",
"path",
")",
"ctx",
".",
"run",
"(",
"cmd",
",",
"echo",
"=",
"True",
")"
] | restores the running osf database with the contents of the database at the location given its argument . | train | false |
225 | def get_permission_cache(user, key):
from django.core.cache import cache
return cache.get(get_cache_key(user, key), version=get_cache_permission_version())
| [
"def",
"get_permission_cache",
"(",
"user",
",",
"key",
")",
":",
"from",
"django",
".",
"core",
".",
"cache",
"import",
"cache",
"return",
"cache",
".",
"get",
"(",
"get_cache_key",
"(",
"user",
",",
"key",
")",
",",
"version",
"=",
"get_cache_permission_version",
"(",
")",
")"
] | helper for reading values from cache . | train | false |
226 | def autolevel_percentile(image, selem, out=None, mask=None, shift_x=False, shift_y=False, p0=0, p1=1):
return _apply(percentile_cy._autolevel, image, selem, out=out, mask=mask, shift_x=shift_x, shift_y=shift_y, p0=p0, p1=p1)
| [
"def",
"autolevel_percentile",
"(",
"image",
",",
"selem",
",",
"out",
"=",
"None",
",",
"mask",
"=",
"None",
",",
"shift_x",
"=",
"False",
",",
"shift_y",
"=",
"False",
",",
"p0",
"=",
"0",
",",
"p1",
"=",
"1",
")",
":",
"return",
"_apply",
"(",
"percentile_cy",
".",
"_autolevel",
",",
"image",
",",
"selem",
",",
"out",
"=",
"out",
",",
"mask",
"=",
"mask",
",",
"shift_x",
"=",
"shift_x",
",",
"shift_y",
"=",
"shift_y",
",",
"p0",
"=",
"p0",
",",
"p1",
"=",
"p1",
")"
] | return greyscale local autolevel of an image . | train | false |
227 | @addon_view
@non_atomic_requests
def usage_series(request, addon, group, start, end, format):
date_range = check_series_params_or_404(group, start, end, format)
check_stats_permission(request, addon)
series = get_series((ThemeUserCount if (addon.type == amo.ADDON_PERSONA) else UpdateCount), addon=addon.id, date__range=date_range)
if (format == 'csv'):
return render_csv(request, addon, series, ['date', 'count'])
elif (format == 'json'):
return render_json(request, addon, series)
| [
"@",
"addon_view",
"@",
"non_atomic_requests",
"def",
"usage_series",
"(",
"request",
",",
"addon",
",",
"group",
",",
"start",
",",
"end",
",",
"format",
")",
":",
"date_range",
"=",
"check_series_params_or_404",
"(",
"group",
",",
"start",
",",
"end",
",",
"format",
")",
"check_stats_permission",
"(",
"request",
",",
"addon",
")",
"series",
"=",
"get_series",
"(",
"(",
"ThemeUserCount",
"if",
"(",
"addon",
".",
"type",
"==",
"amo",
".",
"ADDON_PERSONA",
")",
"else",
"UpdateCount",
")",
",",
"addon",
"=",
"addon",
".",
"id",
",",
"date__range",
"=",
"date_range",
")",
"if",
"(",
"format",
"==",
"'csv'",
")",
":",
"return",
"render_csv",
"(",
"request",
",",
"addon",
",",
"series",
",",
"[",
"'date'",
",",
"'count'",
"]",
")",
"elif",
"(",
"format",
"==",
"'json'",
")",
":",
"return",
"render_json",
"(",
"request",
",",
"addon",
",",
"series",
")"
] | generate adu counts grouped by group in format . | train | false |
228 | def test_ada_fit():
ada = ADASYN(random_state=RND_SEED)
ada.fit(X, Y)
assert_equal(ada.min_c_, 0)
assert_equal(ada.maj_c_, 1)
assert_equal(ada.stats_c_[0], 8)
assert_equal(ada.stats_c_[1], 12)
| [
"def",
"test_ada_fit",
"(",
")",
":",
"ada",
"=",
"ADASYN",
"(",
"random_state",
"=",
"RND_SEED",
")",
"ada",
".",
"fit",
"(",
"X",
",",
"Y",
")",
"assert_equal",
"(",
"ada",
".",
"min_c_",
",",
"0",
")",
"assert_equal",
"(",
"ada",
".",
"maj_c_",
",",
"1",
")",
"assert_equal",
"(",
"ada",
".",
"stats_c_",
"[",
"0",
"]",
",",
"8",
")",
"assert_equal",
"(",
"ada",
".",
"stats_c_",
"[",
"1",
"]",
",",
"12",
")"
] | test the fitting method . | train | false |
229 | def render_modal_workflow(request, html_template, js_template, template_vars=None):
response_keyvars = []
if html_template:
html = render_to_string(html_template, (template_vars or {}), request=request)
response_keyvars.append((u"'html': %s" % json.dumps(html)))
if js_template:
js = render_to_string(js_template, (template_vars or {}), request=request)
response_keyvars.append((u"'onload': %s" % js))
response_text = (u'{%s}' % u','.join(response_keyvars))
return HttpResponse(response_text, content_type=u'text/javascript')
| [
"def",
"render_modal_workflow",
"(",
"request",
",",
"html_template",
",",
"js_template",
",",
"template_vars",
"=",
"None",
")",
":",
"response_keyvars",
"=",
"[",
"]",
"if",
"html_template",
":",
"html",
"=",
"render_to_string",
"(",
"html_template",
",",
"(",
"template_vars",
"or",
"{",
"}",
")",
",",
"request",
"=",
"request",
")",
"response_keyvars",
".",
"append",
"(",
"(",
"u\"'html': %s\"",
"%",
"json",
".",
"dumps",
"(",
"html",
")",
")",
")",
"if",
"js_template",
":",
"js",
"=",
"render_to_string",
"(",
"js_template",
",",
"(",
"template_vars",
"or",
"{",
"}",
")",
",",
"request",
"=",
"request",
")",
"response_keyvars",
".",
"append",
"(",
"(",
"u\"'onload': %s\"",
"%",
"js",
")",
")",
"response_text",
"=",
"(",
"u'{%s}'",
"%",
"u','",
".",
"join",
"(",
"response_keyvars",
")",
")",
"return",
"HttpResponse",
"(",
"response_text",
",",
"content_type",
"=",
"u'text/javascript'",
")"
] | render a response consisting of an html chunk and a js onload chunk in the format required by the modal-workflow framework . | train | false |
232 | def _get_overrides_for_ccx(ccx):
overrides_cache = request_cache.get_cache('ccx-overrides')
if (ccx not in overrides_cache):
overrides = {}
query = CcxFieldOverride.objects.filter(ccx=ccx)
for override in query:
block_overrides = overrides.setdefault(override.location, {})
block_overrides[override.field] = json.loads(override.value)
block_overrides[(override.field + '_id')] = override.id
block_overrides[(override.field + '_instance')] = override
overrides_cache[ccx] = overrides
return overrides_cache[ccx]
| [
"def",
"_get_overrides_for_ccx",
"(",
"ccx",
")",
":",
"overrides_cache",
"=",
"request_cache",
".",
"get_cache",
"(",
"'ccx-overrides'",
")",
"if",
"(",
"ccx",
"not",
"in",
"overrides_cache",
")",
":",
"overrides",
"=",
"{",
"}",
"query",
"=",
"CcxFieldOverride",
".",
"objects",
".",
"filter",
"(",
"ccx",
"=",
"ccx",
")",
"for",
"override",
"in",
"query",
":",
"block_overrides",
"=",
"overrides",
".",
"setdefault",
"(",
"override",
".",
"location",
",",
"{",
"}",
")",
"block_overrides",
"[",
"override",
".",
"field",
"]",
"=",
"json",
".",
"loads",
"(",
"override",
".",
"value",
")",
"block_overrides",
"[",
"(",
"override",
".",
"field",
"+",
"'_id'",
")",
"]",
"=",
"override",
".",
"id",
"block_overrides",
"[",
"(",
"override",
".",
"field",
"+",
"'_instance'",
")",
"]",
"=",
"override",
"overrides_cache",
"[",
"ccx",
"]",
"=",
"overrides",
"return",
"overrides_cache",
"[",
"ccx",
"]"
] | returns a dictionary mapping field name to overriden value for any overrides set on this block for this ccx . | train | false |
234 | def dropout_layer(state_before, use_noise, trng):
proj = tensor.switch(use_noise, (state_before * trng.binomial(state_before.shape, p=0.5, n=1, dtype=state_before.dtype)), (state_before * 0.5))
return proj
| [
"def",
"dropout_layer",
"(",
"state_before",
",",
"use_noise",
",",
"trng",
")",
":",
"proj",
"=",
"tensor",
".",
"switch",
"(",
"use_noise",
",",
"(",
"state_before",
"*",
"trng",
".",
"binomial",
"(",
"state_before",
".",
"shape",
",",
"p",
"=",
"0.5",
",",
"n",
"=",
"1",
",",
"dtype",
"=",
"state_before",
".",
"dtype",
")",
")",
",",
"(",
"state_before",
"*",
"0.5",
")",
")",
"return",
"proj"
] | tensor switch is like an if statement that checks the value of the theano shared variable . | train | false |
235 | def _make_transform_graph_docs():
import inspect
from textwrap import dedent
from ...extern import six
from ..baseframe import BaseCoordinateFrame, frame_transform_graph
isclass = inspect.isclass
coosys = [item for item in six.itervalues(globals()) if (isclass(item) and issubclass(item, BaseCoordinateFrame))]
graphstr = frame_transform_graph.to_dot_graph(addnodes=coosys)
docstr = '\n The diagram below shows all of the coordinate systems built into the\n `~astropy.coordinates` package, their aliases (useful for converting\n other coordinates to them using attribute-style access) and the\n pre-defined transformations between them. The user is free to\n override any of these transformations by defining new transformations\n between these systems, but the pre-defined transformations should be\n sufficient for typical usage.\n\n The graph also indicates the priority for each transformation as a\n number next to the arrow. These priorities are used to decide the\n preferred order when two transformation paths have the same number\n of steps. These priorities are defined such that the path with a\n *smaller* total priority is favored.\n\n\n .. graphviz::\n\n '
return ((dedent(docstr) + ' ') + graphstr.replace('\n', '\n '))
| [
"def",
"_make_transform_graph_docs",
"(",
")",
":",
"import",
"inspect",
"from",
"textwrap",
"import",
"dedent",
"from",
"...",
"extern",
"import",
"six",
"from",
".",
".",
"baseframe",
"import",
"BaseCoordinateFrame",
",",
"frame_transform_graph",
"isclass",
"=",
"inspect",
".",
"isclass",
"coosys",
"=",
"[",
"item",
"for",
"item",
"in",
"six",
".",
"itervalues",
"(",
"globals",
"(",
")",
")",
"if",
"(",
"isclass",
"(",
"item",
")",
"and",
"issubclass",
"(",
"item",
",",
"BaseCoordinateFrame",
")",
")",
"]",
"graphstr",
"=",
"frame_transform_graph",
".",
"to_dot_graph",
"(",
"addnodes",
"=",
"coosys",
")",
"docstr",
"=",
"'\\n The diagram below shows all of the coordinate systems built into the\\n `~astropy.coordinates` package, their aliases (useful for converting\\n other coordinates to them using attribute-style access) and the\\n pre-defined transformations between them. The user is free to\\n override any of these transformations by defining new transformations\\n between these systems, but the pre-defined transformations should be\\n sufficient for typical usage.\\n\\n The graph also indicates the priority for each transformation as a\\n number next to the arrow. These priorities are used to decide the\\n preferred order when two transformation paths have the same number\\n of steps. These priorities are defined such that the path with a\\n *smaller* total priority is favored.\\n\\n\\n .. graphviz::\\n\\n '",
"return",
"(",
"(",
"dedent",
"(",
"docstr",
")",
"+",
"' '",
")",
"+",
"graphstr",
".",
"replace",
"(",
"'\\n'",
",",
"'\\n '",
")",
")"
] | generates a string for use with the coordinate packages docstring to show the available transforms and coordinate systems . | train | false |
236 | def subtract_modulo(image1, image2):
image1.load()
image2.load()
return image1._new(image1.im.chop_subtract_modulo(image2.im))
| [
"def",
"subtract_modulo",
"(",
"image1",
",",
"image2",
")",
":",
"image1",
".",
"load",
"(",
")",
"image2",
".",
"load",
"(",
")",
"return",
"image1",
".",
"_new",
"(",
"image1",
".",
"im",
".",
"chop_subtract_modulo",
"(",
"image2",
".",
"im",
")",
")"
] | subtract two images . | train | false |
237 | def isLargeSameDirection(inset, loop, radius):
if (euclidean.isWiddershins(inset) != euclidean.isWiddershins(loop)):
return False
return (euclidean.getMaximumSpan(inset) > (2.01 * abs(radius)))
| [
"def",
"isLargeSameDirection",
"(",
"inset",
",",
"loop",
",",
"radius",
")",
":",
"if",
"(",
"euclidean",
".",
"isWiddershins",
"(",
"inset",
")",
"!=",
"euclidean",
".",
"isWiddershins",
"(",
"loop",
")",
")",
":",
"return",
"False",
"return",
"(",
"euclidean",
".",
"getMaximumSpan",
"(",
"inset",
")",
">",
"(",
"2.01",
"*",
"abs",
"(",
"radius",
")",
")",
")"
] | determine if the inset is in the same direction as the loop and it is large enough . | train | false |
238 | def CreateDefaultGUI(appClass=None):
if (appClass is None):
import intpyapp
appClass = intpyapp.InteractivePythonApp
appClass().InitInstance()
| [
"def",
"CreateDefaultGUI",
"(",
"appClass",
"=",
"None",
")",
":",
"if",
"(",
"appClass",
"is",
"None",
")",
":",
"import",
"intpyapp",
"appClass",
"=",
"intpyapp",
".",
"InteractivePythonApp",
"appClass",
"(",
")",
".",
"InitInstance",
"(",
")"
] | creates a default gui environment . | train | false |
239 | def bool_(val):
if (isinstance(val, six.string_types) and (val.lower() == 'false')):
return False
return bool(val)
| [
"def",
"bool_",
"(",
"val",
")",
":",
"if",
"(",
"isinstance",
"(",
"val",
",",
"six",
".",
"string_types",
")",
"and",
"(",
"val",
".",
"lower",
"(",
")",
"==",
"'false'",
")",
")",
":",
"return",
"False",
"return",
"bool",
"(",
"val",
")"
] | like bool . | train | false |
240 | def build_bond(iface, **settings):
rh_major = __grains__['osrelease'][:1]
opts = _parse_settings_bond(settings, iface)
try:
template = JINJA.get_template('conf.jinja')
except jinja2.exceptions.TemplateNotFound:
log.error('Could not load template conf.jinja')
return ''
data = template.render({'name': iface, 'bonding': opts})
_write_file_iface(iface, data, _RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface))
path = os.path.join(_RH_NETWORK_CONF_FILES, '{0}.conf'.format(iface))
if (rh_major == '5'):
__salt__['cmd.run']('sed -i -e "/^alias\\s{0}.*/d" /etc/modprobe.conf'.format(iface), python_shell=False)
__salt__['cmd.run']('sed -i -e "/^options\\s{0}.*/d" /etc/modprobe.conf'.format(iface), python_shell=False)
__salt__['file.append']('/etc/modprobe.conf', path)
__salt__['kmod.load']('bonding')
if settings['test']:
return _read_temp(data)
return _read_file(path)
| [
"def",
"build_bond",
"(",
"iface",
",",
"**",
"settings",
")",
":",
"rh_major",
"=",
"__grains__",
"[",
"'osrelease'",
"]",
"[",
":",
"1",
"]",
"opts",
"=",
"_parse_settings_bond",
"(",
"settings",
",",
"iface",
")",
"try",
":",
"template",
"=",
"JINJA",
".",
"get_template",
"(",
"'conf.jinja'",
")",
"except",
"jinja2",
".",
"exceptions",
".",
"TemplateNotFound",
":",
"log",
".",
"error",
"(",
"'Could not load template conf.jinja'",
")",
"return",
"''",
"data",
"=",
"template",
".",
"render",
"(",
"{",
"'name'",
":",
"iface",
",",
"'bonding'",
":",
"opts",
"}",
")",
"_write_file_iface",
"(",
"iface",
",",
"data",
",",
"_RH_NETWORK_CONF_FILES",
",",
"'{0}.conf'",
".",
"format",
"(",
"iface",
")",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"_RH_NETWORK_CONF_FILES",
",",
"'{0}.conf'",
".",
"format",
"(",
"iface",
")",
")",
"if",
"(",
"rh_major",
"==",
"'5'",
")",
":",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"'sed -i -e \"/^alias\\\\s{0}.*/d\" /etc/modprobe.conf'",
".",
"format",
"(",
"iface",
")",
",",
"python_shell",
"=",
"False",
")",
"__salt__",
"[",
"'cmd.run'",
"]",
"(",
"'sed -i -e \"/^options\\\\s{0}.*/d\" /etc/modprobe.conf'",
".",
"format",
"(",
"iface",
")",
",",
"python_shell",
"=",
"False",
")",
"__salt__",
"[",
"'file.append'",
"]",
"(",
"'/etc/modprobe.conf'",
",",
"path",
")",
"__salt__",
"[",
"'kmod.load'",
"]",
"(",
"'bonding'",
")",
"if",
"settings",
"[",
"'test'",
"]",
":",
"return",
"_read_temp",
"(",
"data",
")",
"return",
"_read_file",
"(",
"path",
")"
] | create a bond script in /etc/modprobe . | train | true |
241 | def get_unique_variable(name):
candidates = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, name)
if (not candidates):
raise ValueError(('Couldnt find variable %s' % name))
for candidate in candidates:
if (candidate.op.name == name):
return candidate
raise ValueError('Variable %s does not uniquely identify a variable', name)
| [
"def",
"get_unique_variable",
"(",
"name",
")",
":",
"candidates",
"=",
"tf",
".",
"get_collection",
"(",
"tf",
".",
"GraphKeys",
".",
"GLOBAL_VARIABLES",
",",
"name",
")",
"if",
"(",
"not",
"candidates",
")",
":",
"raise",
"ValueError",
"(",
"(",
"'Couldnt find variable %s'",
"%",
"name",
")",
")",
"for",
"candidate",
"in",
"candidates",
":",
"if",
"(",
"candidate",
".",
"op",
".",
"name",
"==",
"name",
")",
":",
"return",
"candidate",
"raise",
"ValueError",
"(",
"'Variable %s does not uniquely identify a variable'",
",",
"name",
")"
] | gets the variable uniquely identified by that name . | train | true |
242 | def _hexify(data, chunksize=None):
if (chunksize is None):
chunksize = _hex_chunksize
hex = data.encode('hex_codec')
l = len(hex)
if (l > chunksize):
chunks = []
i = 0
while (i < l):
chunks.append(hex[i:(i + chunksize)])
i += chunksize
hex = ' '.join(chunks)
return hex
| [
"def",
"_hexify",
"(",
"data",
",",
"chunksize",
"=",
"None",
")",
":",
"if",
"(",
"chunksize",
"is",
"None",
")",
":",
"chunksize",
"=",
"_hex_chunksize",
"hex",
"=",
"data",
".",
"encode",
"(",
"'hex_codec'",
")",
"l",
"=",
"len",
"(",
"hex",
")",
"if",
"(",
"l",
">",
"chunksize",
")",
":",
"chunks",
"=",
"[",
"]",
"i",
"=",
"0",
"while",
"(",
"i",
"<",
"l",
")",
":",
"chunks",
".",
"append",
"(",
"hex",
"[",
"i",
":",
"(",
"i",
"+",
"chunksize",
")",
"]",
")",
"i",
"+=",
"chunksize",
"hex",
"=",
"' '",
".",
"join",
"(",
"chunks",
")",
"return",
"hex"
] | convert a binary string into its hex encoding . | train | true |
243 | def test_hsl_to_rgb_part_12():
assert (hsl_to_rgb(300, 20, 50) == (153, 102, 153))
assert (hsl_to_rgb(300, 60, 50) == (204, 51, 204))
assert (hsl_to_rgb(300, 100, 50) == (255, 0, 255))
| [
"def",
"test_hsl_to_rgb_part_12",
"(",
")",
":",
"assert",
"(",
"hsl_to_rgb",
"(",
"300",
",",
"20",
",",
"50",
")",
"==",
"(",
"153",
",",
"102",
",",
"153",
")",
")",
"assert",
"(",
"hsl_to_rgb",
"(",
"300",
",",
"60",
",",
"50",
")",
"==",
"(",
"204",
",",
"51",
",",
"204",
")",
")",
"assert",
"(",
"hsl_to_rgb",
"(",
"300",
",",
"100",
",",
"50",
")",
"==",
"(",
"255",
",",
"0",
",",
"255",
")",
")"
] | test hsl to rgb color function . | train | false |
244 | def get_data(datastore, path):
client = _get_client()
return client.get_datastore_data(datastore, path)
| [
"def",
"get_data",
"(",
"datastore",
",",
"path",
")",
":",
"client",
"=",
"_get_client",
"(",
")",
"return",
"client",
".",
"get_datastore_data",
"(",
"datastore",
",",
"path",
")"
] | uses the metadata module to parse the metadata from the provided url . | train | false |
245 | def insert_hyphens(node, hyphenator):
textattrs = (u'text', u'tail')
if isinstance(node, lxml.etree._Entity):
textattrs = (u'tail',)
for attr in textattrs:
text = getattr(node, attr)
if (not text):
continue
new_data = u' '.join([hyphenator.inserted(w, hyphen=u'\xad') for w in text.split(u' ')])
if text[0].isspace():
new_data = (u' ' + new_data)
if text[(-1)].isspace():
new_data += u' '
setattr(node, attr, new_data)
for child in node.iterchildren():
insert_hyphens(child, hyphenator)
| [
"def",
"insert_hyphens",
"(",
"node",
",",
"hyphenator",
")",
":",
"textattrs",
"=",
"(",
"u'text'",
",",
"u'tail'",
")",
"if",
"isinstance",
"(",
"node",
",",
"lxml",
".",
"etree",
".",
"_Entity",
")",
":",
"textattrs",
"=",
"(",
"u'tail'",
",",
")",
"for",
"attr",
"in",
"textattrs",
":",
"text",
"=",
"getattr",
"(",
"node",
",",
"attr",
")",
"if",
"(",
"not",
"text",
")",
":",
"continue",
"new_data",
"=",
"u' '",
".",
"join",
"(",
"[",
"hyphenator",
".",
"inserted",
"(",
"w",
",",
"hyphen",
"=",
"u'\\xad'",
")",
"for",
"w",
"in",
"text",
".",
"split",
"(",
"u' '",
")",
"]",
")",
"if",
"text",
"[",
"0",
"]",
".",
"isspace",
"(",
")",
":",
"new_data",
"=",
"(",
"u' '",
"+",
"new_data",
")",
"if",
"text",
"[",
"(",
"-",
"1",
")",
"]",
".",
"isspace",
"(",
")",
":",
"new_data",
"+=",
"u' '",
"setattr",
"(",
"node",
",",
"attr",
",",
"new_data",
")",
"for",
"child",
"in",
"node",
".",
"iterchildren",
"(",
")",
":",
"insert_hyphens",
"(",
"child",
",",
"hyphenator",
")"
] | insert hyphens into a node . | train | false |
246 | def getTopPaths(paths):
top = (-999999999.9)
for path in paths:
for point in path:
top = max(top, point.z)
return top
| [
"def",
"getTopPaths",
"(",
"paths",
")",
":",
"top",
"=",
"(",
"-",
"999999999.9",
")",
"for",
"path",
"in",
"paths",
":",
"for",
"point",
"in",
"path",
":",
"top",
"=",
"max",
"(",
"top",
",",
"point",
".",
"z",
")",
"return",
"top"
] | get the top of the paths . | train | false |
247 | def normalize_dictionary(data_dict):
for (key, value) in data_dict.items():
if (not isinstance(key, str)):
del data_dict[key]
data_dict[str(key)] = value
return data_dict
| [
"def",
"normalize_dictionary",
"(",
"data_dict",
")",
":",
"for",
"(",
"key",
",",
"value",
")",
"in",
"data_dict",
".",
"items",
"(",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"key",
",",
"str",
")",
")",
":",
"del",
"data_dict",
"[",
"key",
"]",
"data_dict",
"[",
"str",
"(",
"key",
")",
"]",
"=",
"value",
"return",
"data_dict"
] | converts all the keys in "data_dict" to strings . | train | true |
248 | @register.filter
def display_url(url):
url = force_bytes(url, errors='replace')
return urllib.unquote(url).decode('utf-8', errors='replace')
| [
"@",
"register",
".",
"filter",
"def",
"display_url",
"(",
"url",
")",
":",
"url",
"=",
"force_bytes",
"(",
"url",
",",
"errors",
"=",
"'replace'",
")",
"return",
"urllib",
".",
"unquote",
"(",
"url",
")",
".",
"decode",
"(",
"'utf-8'",
",",
"errors",
"=",
"'replace'",
")"
] | display a url like the browser url bar would . | train | false |
250 | def get_file_json(path):
with open(path, u'r') as f:
return json.load(f)
| [
"def",
"get_file_json",
"(",
"path",
")",
":",
"with",
"open",
"(",
"path",
",",
"u'r'",
")",
"as",
"f",
":",
"return",
"json",
".",
"load",
"(",
"f",
")"
] | read a file and return parsed json object . | train | false |
251 | def ancestry(path):
out = []
last_path = None
while path:
path = os.path.dirname(path)
if (path == last_path):
break
last_path = path
if path:
out.insert(0, path)
return out
| [
"def",
"ancestry",
"(",
"path",
")",
":",
"out",
"=",
"[",
"]",
"last_path",
"=",
"None",
"while",
"path",
":",
"path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
"if",
"(",
"path",
"==",
"last_path",
")",
":",
"break",
"last_path",
"=",
"path",
"if",
"path",
":",
"out",
".",
"insert",
"(",
"0",
",",
"path",
")",
"return",
"out"
] | return a list consisting of paths parent directory . | train | false |
252 | def _ClassifyInclude(fileinfo, include, is_system):
is_cpp_h = (include in _CPP_HEADERS)
if is_system:
if is_cpp_h:
return _CPP_SYS_HEADER
else:
return _C_SYS_HEADER
(target_dir, target_base) = os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName()))
(include_dir, include_base) = os.path.split(_DropCommonSuffixes(include))
if ((target_base == include_base) and ((include_dir == target_dir) or (include_dir == os.path.normpath((target_dir + '/../public'))))):
return _LIKELY_MY_HEADER
target_first_component = _RE_FIRST_COMPONENT.match(target_base)
include_first_component = _RE_FIRST_COMPONENT.match(include_base)
if (target_first_component and include_first_component and (target_first_component.group(0) == include_first_component.group(0))):
return _POSSIBLE_MY_HEADER
return _OTHER_HEADER
| [
"def",
"_ClassifyInclude",
"(",
"fileinfo",
",",
"include",
",",
"is_system",
")",
":",
"is_cpp_h",
"=",
"(",
"include",
"in",
"_CPP_HEADERS",
")",
"if",
"is_system",
":",
"if",
"is_cpp_h",
":",
"return",
"_CPP_SYS_HEADER",
"else",
":",
"return",
"_C_SYS_HEADER",
"(",
"target_dir",
",",
"target_base",
")",
"=",
"os",
".",
"path",
".",
"split",
"(",
"_DropCommonSuffixes",
"(",
"fileinfo",
".",
"RepositoryName",
"(",
")",
")",
")",
"(",
"include_dir",
",",
"include_base",
")",
"=",
"os",
".",
"path",
".",
"split",
"(",
"_DropCommonSuffixes",
"(",
"include",
")",
")",
"if",
"(",
"(",
"target_base",
"==",
"include_base",
")",
"and",
"(",
"(",
"include_dir",
"==",
"target_dir",
")",
"or",
"(",
"include_dir",
"==",
"os",
".",
"path",
".",
"normpath",
"(",
"(",
"target_dir",
"+",
"'/../public'",
")",
")",
")",
")",
")",
":",
"return",
"_LIKELY_MY_HEADER",
"target_first_component",
"=",
"_RE_FIRST_COMPONENT",
".",
"match",
"(",
"target_base",
")",
"include_first_component",
"=",
"_RE_FIRST_COMPONENT",
".",
"match",
"(",
"include_base",
")",
"if",
"(",
"target_first_component",
"and",
"include_first_component",
"and",
"(",
"target_first_component",
".",
"group",
"(",
"0",
")",
"==",
"include_first_component",
".",
"group",
"(",
"0",
")",
")",
")",
":",
"return",
"_POSSIBLE_MY_HEADER",
"return",
"_OTHER_HEADER"
] | figures out what kind of header include is . | train | true |
253 | def find_dynamicsymbols(expression, exclude=None):
t_set = {dynamicsymbols._t}
if exclude:
if iterable(exclude):
exclude_set = set(exclude)
else:
raise TypeError('exclude kwarg must be iterable')
else:
exclude_set = set()
return (set([i for i in expression.atoms(AppliedUndef, Derivative) if (i.free_symbols == t_set)]) - exclude_set)
| [
"def",
"find_dynamicsymbols",
"(",
"expression",
",",
"exclude",
"=",
"None",
")",
":",
"t_set",
"=",
"{",
"dynamicsymbols",
".",
"_t",
"}",
"if",
"exclude",
":",
"if",
"iterable",
"(",
"exclude",
")",
":",
"exclude_set",
"=",
"set",
"(",
"exclude",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'exclude kwarg must be iterable'",
")",
"else",
":",
"exclude_set",
"=",
"set",
"(",
")",
"return",
"(",
"set",
"(",
"[",
"i",
"for",
"i",
"in",
"expression",
".",
"atoms",
"(",
"AppliedUndef",
",",
"Derivative",
")",
"if",
"(",
"i",
".",
"free_symbols",
"==",
"t_set",
")",
"]",
")",
"-",
"exclude_set",
")"
] | find all dynamicsymbols in expression . | train | false |
254 | def in6_ismlladdr(str):
return in6_isincluded(str, 'ff02::', 16)
| [
"def",
"in6_ismlladdr",
"(",
"str",
")",
":",
"return",
"in6_isincluded",
"(",
"str",
",",
"'ff02::'",
",",
"16",
")"
] | returns true if address belongs to link-local multicast address space . | train | false |
255 | def _create_ofb_cipher(factory, **kwargs):
cipher_state = factory._create_base_cipher(kwargs)
iv = kwargs.pop('IV', None)
IV = kwargs.pop('iv', None)
if ((None, None) == (iv, IV)):
iv = get_random_bytes(factory.block_size)
if (iv is not None):
if (IV is not None):
raise TypeError("You must either use 'iv' or 'IV', not both")
else:
iv = IV
if kwargs:
raise TypeError(('Unknown parameters for OFB: %s' % str(kwargs)))
return OfbMode(cipher_state, iv)
| [
"def",
"_create_ofb_cipher",
"(",
"factory",
",",
"**",
"kwargs",
")",
":",
"cipher_state",
"=",
"factory",
".",
"_create_base_cipher",
"(",
"kwargs",
")",
"iv",
"=",
"kwargs",
".",
"pop",
"(",
"'IV'",
",",
"None",
")",
"IV",
"=",
"kwargs",
".",
"pop",
"(",
"'iv'",
",",
"None",
")",
"if",
"(",
"(",
"None",
",",
"None",
")",
"==",
"(",
"iv",
",",
"IV",
")",
")",
":",
"iv",
"=",
"get_random_bytes",
"(",
"factory",
".",
"block_size",
")",
"if",
"(",
"iv",
"is",
"not",
"None",
")",
":",
"if",
"(",
"IV",
"is",
"not",
"None",
")",
":",
"raise",
"TypeError",
"(",
"\"You must either use 'iv' or 'IV', not both\"",
")",
"else",
":",
"iv",
"=",
"IV",
"if",
"kwargs",
":",
"raise",
"TypeError",
"(",
"(",
"'Unknown parameters for OFB: %s'",
"%",
"str",
"(",
"kwargs",
")",
")",
")",
"return",
"OfbMode",
"(",
"cipher_state",
",",
"iv",
")"
] | instantiate a cipher object that performs ofb encryption/decryption . | train | false |
256 | def init(mpstate):
return SerialModule(mpstate)
| [
"def",
"init",
"(",
"mpstate",
")",
":",
"return",
"SerialModule",
"(",
"mpstate",
")"
] | initialise module . | train | false |
257 | def unpack_callbacks(cbs):
if cbs:
return [[i for i in f if i] for f in zip(*cbs)]
else:
return [(), (), (), (), ()]
| [
"def",
"unpack_callbacks",
"(",
"cbs",
")",
":",
"if",
"cbs",
":",
"return",
"[",
"[",
"i",
"for",
"i",
"in",
"f",
"if",
"i",
"]",
"for",
"f",
"in",
"zip",
"(",
"*",
"cbs",
")",
"]",
"else",
":",
"return",
"[",
"(",
")",
",",
"(",
")",
",",
"(",
")",
",",
"(",
")",
",",
"(",
")",
"]"
] | take an iterable of callbacks . | train | false |
258 | def recreate_field(unbound):
if (not isinstance(unbound, UnboundField)):
raise ValueError(('recreate_field expects UnboundField instance, %s was passed.' % type(unbound)))
return unbound.field_class(*unbound.args, **unbound.kwargs)
| [
"def",
"recreate_field",
"(",
"unbound",
")",
":",
"if",
"(",
"not",
"isinstance",
"(",
"unbound",
",",
"UnboundField",
")",
")",
":",
"raise",
"ValueError",
"(",
"(",
"'recreate_field expects UnboundField instance, %s was passed.'",
"%",
"type",
"(",
"unbound",
")",
")",
")",
"return",
"unbound",
".",
"field_class",
"(",
"*",
"unbound",
".",
"args",
",",
"**",
"unbound",
".",
"kwargs",
")"
] | create new instance of the unbound field . | train | false |
259 | def test_attribute_access():
can_compile(u'(. foo bar baz)')
can_compile(u'(. foo [bar] baz)')
can_compile(u'(. foo bar [baz] [0] quux [frob])')
can_compile(u'(. foo bar [(+ 1 2 3 4)] quux [frob])')
cant_compile(u'(. foo bar :baz [0] quux [frob])')
cant_compile(u'(. foo bar baz (0) quux [frob])')
cant_compile(u'(. foo bar baz [0] quux {frob})')
| [
"def",
"test_attribute_access",
"(",
")",
":",
"can_compile",
"(",
"u'(. foo bar baz)'",
")",
"can_compile",
"(",
"u'(. foo [bar] baz)'",
")",
"can_compile",
"(",
"u'(. foo bar [baz] [0] quux [frob])'",
")",
"can_compile",
"(",
"u'(. foo bar [(+ 1 2 3 4)] quux [frob])'",
")",
"cant_compile",
"(",
"u'(. foo bar :baz [0] quux [frob])'",
")",
"cant_compile",
"(",
"u'(. foo bar baz (0) quux [frob])'",
")",
"cant_compile",
"(",
"u'(. foo bar baz [0] quux {frob})'",
")"
] | ensure attribute access compiles correctly . | train | false |
260 | def precedence(state):
try:
return PRECEDENCE_LOOKUP[state]
except KeyError:
return NONE_PRECEDENCE
| [
"def",
"precedence",
"(",
"state",
")",
":",
"try",
":",
"return",
"PRECEDENCE_LOOKUP",
"[",
"state",
"]",
"except",
"KeyError",
":",
"return",
"NONE_PRECEDENCE"
] | get the precedence index for state . | train | false |