id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
sequence
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
1
def install_translator(qapp): global QT_TRANSLATOR if (QT_TRANSLATOR is None): qt_translator = QTranslator() if qt_translator.load(('qt_' + QLocale.system().name()), QLibraryInfo.location(QLibraryInfo.TranslationsPath)): QT_TRANSLATOR = qt_translator if (QT_TRANSLATOR is not None): qapp.installTranslator(QT_TRANSLATOR)
[ "def", "install_translator", "(", "qapp", ")", ":", "global", "QT_TRANSLATOR", "if", "(", "QT_TRANSLATOR", "is", "None", ")", ":", "qt_translator", "=", "QTranslator", "(", ")", "if", "qt_translator", ".", "load", "(", "(", "'qt_'", "+", "QLocale", ".", "system", "(", ")", ".", "name", "(", ")", ")", ",", "QLibraryInfo", ".", "location", "(", "QLibraryInfo", ".", "TranslationsPath", ")", ")", ":", "QT_TRANSLATOR", "=", "qt_translator", "if", "(", "QT_TRANSLATOR", "is", "not", "None", ")", ":", "qapp", ".", "installTranslator", "(", "QT_TRANSLATOR", ")" ]
install qt translator to the qapplication instance .
train
true
2
def delete_dhcp_options(dhcp_options_id=None, dhcp_options_name=None, region=None, key=None, keyid=None, profile=None): return _delete_resource(resource='dhcp_options', name=dhcp_options_name, resource_id=dhcp_options_id, region=region, key=key, keyid=keyid, profile=profile)
[ "def", "delete_dhcp_options", "(", "dhcp_options_id", "=", "None", ",", "dhcp_options_name", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "return", "_delete_resource", "(", "resource", "=", "'dhcp_options'", ",", "name", "=", "dhcp_options_name", ",", "resource_id", "=", "dhcp_options_id", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")" ]
delete dhcp options by id or name .
train
true
4
def test_depth_first_mro(): class A(object, ): pass class B(A, ): pass class C(A, ): pass class D(B, C, ): pass class E(D, object, ): pass class G(object, ): pass class H(G, ): pass class I(G, ): pass class K(H, I, object, ): pass class L(K, E, ): pass AreEqual(L.__mro__, (L, K, H, I, G, E, D, B, C, A, object))
[ "def", "test_depth_first_mro", "(", ")", ":", "class", "A", "(", "object", ",", ")", ":", "pass", "class", "B", "(", "A", ",", ")", ":", "pass", "class", "C", "(", "A", ",", ")", ":", "pass", "class", "D", "(", "B", ",", "C", ",", ")", ":", "pass", "class", "E", "(", "D", ",", "object", ",", ")", ":", "pass", "class", "G", "(", "object", ",", ")", ":", "pass", "class", "H", "(", "G", ",", ")", ":", "pass", "class", "I", "(", "G", ",", ")", ":", "pass", "class", "K", "(", "H", ",", "I", ",", "object", ",", ")", ":", "pass", "class", "L", "(", "K", ",", "E", ",", ")", ":", "pass", "AreEqual", "(", "L", ".", "__mro__", ",", "(", "L", ",", "K", ",", "H", ",", "I", ",", "G", ",", "E", ",", "D", ",", "B", ",", "C", ",", "A", ",", "object", ")", ")" ]
w/o old-style .
train
false
5
def mapping(data_source, geom_name='geom', layer_key=0, multi_geom=False): if isinstance(data_source, six.string_types): data_source = DataSource(data_source) elif isinstance(data_source, DataSource): pass else: raise TypeError('Data source parameter must be a string or a DataSource object.') _mapping = {} for field in data_source[layer_key].fields: mfield = field.lower() if (mfield[(-1):] == '_'): mfield += 'field' _mapping[mfield] = field gtype = data_source[layer_key].geom_type if (multi_geom and (gtype.num in (1, 2, 3))): prefix = 'MULTI' else: prefix = '' _mapping[geom_name] = (prefix + str(gtype).upper()) return _mapping
[ "def", "mapping", "(", "data_source", ",", "geom_name", "=", "'geom'", ",", "layer_key", "=", "0", ",", "multi_geom", "=", "False", ")", ":", "if", "isinstance", "(", "data_source", ",", "six", ".", "string_types", ")", ":", "data_source", "=", "DataSource", "(", "data_source", ")", "elif", "isinstance", "(", "data_source", ",", "DataSource", ")", ":", "pass", "else", ":", "raise", "TypeError", "(", "'Data source parameter must be a string or a DataSource object.'", ")", "_mapping", "=", "{", "}", "for", "field", "in", "data_source", "[", "layer_key", "]", ".", "fields", ":", "mfield", "=", "field", ".", "lower", "(", ")", "if", "(", "mfield", "[", "(", "-", "1", ")", ":", "]", "==", "'_'", ")", ":", "mfield", "+=", "'field'", "_mapping", "[", "mfield", "]", "=", "field", "gtype", "=", "data_source", "[", "layer_key", "]", ".", "geom_type", "if", "(", "multi_geom", "and", "(", "gtype", ".", "num", "in", "(", "1", ",", "2", ",", "3", ")", ")", ")", ":", "prefix", "=", "'MULTI'", "else", ":", "prefix", "=", "''", "_mapping", "[", "geom_name", "]", "=", "(", "prefix", "+", "str", "(", "gtype", ")", ".", "upper", "(", ")", ")", "return", "_mapping" ]
given a datasource .
train
false
7
def test_pprint_npfloat32(): dat = np.array([1.0, 2.0], dtype=np.float32) t = Table([dat], names=['a']) t['a'].format = '5.2f' assert (str(t['a']) == ' a \n-----\n 1.00\n 2.00')
[ "def", "test_pprint_npfloat32", "(", ")", ":", "dat", "=", "np", ".", "array", "(", "[", "1.0", ",", "2.0", "]", ",", "dtype", "=", "np", ".", "float32", ")", "t", "=", "Table", "(", "[", "dat", "]", ",", "names", "=", "[", "'a'", "]", ")", "t", "[", "'a'", "]", ".", "format", "=", "'5.2f'", "assert", "(", "str", "(", "t", "[", "'a'", "]", ")", "==", "' a \\n-----\\n 1.00\\n 2.00'", ")" ]
test for #148 .
train
false
8
def test_interpolation(): (t0, k0) = (0, np.array([5.0])) results = _compute_fixed_length_solns(model, t0, k0) for (integrator, numeric_solution) in results.items(): (N, T) = (1000, numeric_solution[:, 0][(-1)]) ti = np.linspace(t0, T, N) interp_solution = model.interpolate(numeric_solution, ti, k=3, ext=2) analytic_solution = solow_analytic_solution(ti, k0, *valid_params) np.testing.assert_allclose(interp_solution, analytic_solution)
[ "def", "test_interpolation", "(", ")", ":", "(", "t0", ",", "k0", ")", "=", "(", "0", ",", "np", ".", "array", "(", "[", "5.0", "]", ")", ")", "results", "=", "_compute_fixed_length_solns", "(", "model", ",", "t0", ",", "k0", ")", "for", "(", "integrator", ",", "numeric_solution", ")", "in", "results", ".", "items", "(", ")", ":", "(", "N", ",", "T", ")", "=", "(", "1000", ",", "numeric_solution", "[", ":", ",", "0", "]", "[", "(", "-", "1", ")", "]", ")", "ti", "=", "np", ".", "linspace", "(", "t0", ",", "T", ",", "N", ")", "interp_solution", "=", "model", ".", "interpolate", "(", "numeric_solution", ",", "ti", ",", "k", "=", "3", ",", "ext", "=", "2", ")", "analytic_solution", "=", "solow_analytic_solution", "(", "ti", ",", "k0", ",", "*", "valid_params", ")", "np", ".", "testing", ".", "assert_allclose", "(", "interp_solution", ",", "analytic_solution", ")" ]
test interpolation option .
train
false
9
def _save_and_remove_module(name, orig_modules): if (name not in sys.modules): __import__(name) del sys.modules[name] for modname in list(sys.modules): if ((modname == name) or modname.startswith((name + '.'))): orig_modules[modname] = sys.modules[modname] del sys.modules[modname]
[ "def", "_save_and_remove_module", "(", "name", ",", "orig_modules", ")", ":", "if", "(", "name", "not", "in", "sys", ".", "modules", ")", ":", "__import__", "(", "name", ")", "del", "sys", ".", "modules", "[", "name", "]", "for", "modname", "in", "list", "(", "sys", ".", "modules", ")", ":", "if", "(", "(", "modname", "==", "name", ")", "or", "modname", ".", "startswith", "(", "(", "name", "+", "'.'", ")", ")", ")", ":", "orig_modules", "[", "modname", "]", "=", "sys", ".", "modules", "[", "modname", "]", "del", "sys", ".", "modules", "[", "modname", "]" ]
helper function to save and remove a module from sys .
train
false
12
def url2ip(url): iport = urlsplit(url)[1].split(':') if (len(iport) > 1): return (gethostbyname(iport[0]), iport[1]) return gethostbyname(iport[0])
[ "def", "url2ip", "(", "url", ")", ":", "iport", "=", "urlsplit", "(", "url", ")", "[", "1", "]", ".", "split", "(", "':'", ")", "if", "(", "len", "(", "iport", ")", ">", "1", ")", ":", "return", "(", "gethostbyname", "(", "iport", "[", "0", "]", ")", ",", "iport", "[", "1", "]", ")", "return", "gethostbyname", "(", "iport", "[", "0", "]", ")" ]
works like turning URL => 180 .
train
false
13
def _wait_until_running(instance): with start_action(action_type=u'flocker:provision:aws:wait_until_running', instance_id=instance.id) as context: _poll_while((lambda : _node_is_booting(instance)), repeat(1, INSTANCE_TIMEOUT)) context.add_success_fields(instance_state=instance.state) context.add_success_fields(instance_state_reason=instance.state_reason) if (instance.state != u'running'): raise FailedToRun(instance.state_reason)
[ "def", "_wait_until_running", "(", "instance", ")", ":", "with", "start_action", "(", "action_type", "=", "u'flocker:provision:aws:wait_until_running'", ",", "instance_id", "=", "instance", ".", "id", ")", "as", "context", ":", "_poll_while", "(", "(", "lambda", ":", "_node_is_booting", "(", "instance", ")", ")", ",", "repeat", "(", "1", ",", "INSTANCE_TIMEOUT", ")", ")", "context", ".", "add_success_fields", "(", "instance_state", "=", "instance", ".", "state", ")", "context", ".", "add_success_fields", "(", "instance_state_reason", "=", "instance", ".", "state_reason", ")", "if", "(", "instance", ".", "state", "!=", "u'running'", ")", ":", "raise", "FailedToRun", "(", "instance", ".", "state_reason", ")" ]
wait until a instance is running .
train
false
14
def dict_to_numpy_array(d, mapping=None): try: return dict_to_numpy_array2(d, mapping) except (AttributeError, TypeError): return dict_to_numpy_array1(d, mapping)
[ "def", "dict_to_numpy_array", "(", "d", ",", "mapping", "=", "None", ")", ":", "try", ":", "return", "dict_to_numpy_array2", "(", "d", ",", "mapping", ")", "except", "(", "AttributeError", ",", "TypeError", ")", ":", "return", "dict_to_numpy_array1", "(", "d", ",", "mapping", ")" ]
convert a dictionary of dictionaries to a numpy array with optional mapping .
train
false
15
def _is_suggestion_handled(thread_id, exploration_id): thread = feedback_models.FeedbackThreadModel.get_by_exp_and_thread_id(exploration_id, thread_id) return (thread.status in [feedback_models.STATUS_CHOICES_FIXED, feedback_models.STATUS_CHOICES_IGNORED])
[ "def", "_is_suggestion_handled", "(", "thread_id", ",", "exploration_id", ")", ":", "thread", "=", "feedback_models", ".", "FeedbackThreadModel", ".", "get_by_exp_and_thread_id", "(", "exploration_id", ",", "thread_id", ")", "return", "(", "thread", ".", "status", "in", "[", "feedback_models", ".", "STATUS_CHOICES_FIXED", ",", "feedback_models", ".", "STATUS_CHOICES_IGNORED", "]", ")" ]
checks if the current suggestion has already been accepted/rejected .
train
false
17
def get_default_site(app_name='filebrowser'): resolver = get_resolver(get_urlconf()) name = 'filebrowser' app_list = resolver.app_dict[app_name] if (name not in app_list): name = app_list[0] return get_site_dict()[name]
[ "def", "get_default_site", "(", "app_name", "=", "'filebrowser'", ")", ":", "resolver", "=", "get_resolver", "(", "get_urlconf", "(", ")", ")", "name", "=", "'filebrowser'", "app_list", "=", "resolver", ".", "app_dict", "[", "app_name", "]", "if", "(", "name", "not", "in", "app_list", ")", ":", "name", "=", "app_list", "[", "0", "]", "return", "get_site_dict", "(", ")", "[", "name", "]" ]
returns the default site .
train
false
18
def ccovf(x, y, unbiased=True, demean=True): n = len(x) if demean: xo = (x - x.mean()) yo = (y - y.mean()) else: xo = x yo = y if unbiased: xi = np.ones(n) d = np.correlate(xi, xi, 'full') else: d = n return (np.correlate(xo, yo, 'full') / d)[(n - 1):]
[ "def", "ccovf", "(", "x", ",", "y", ",", "unbiased", "=", "True", ",", "demean", "=", "True", ")", ":", "n", "=", "len", "(", "x", ")", "if", "demean", ":", "xo", "=", "(", "x", "-", "x", ".", "mean", "(", ")", ")", "yo", "=", "(", "y", "-", "y", ".", "mean", "(", ")", ")", "else", ":", "xo", "=", "x", "yo", "=", "y", "if", "unbiased", ":", "xi", "=", "np", ".", "ones", "(", "n", ")", "d", "=", "np", ".", "correlate", "(", "xi", ",", "xi", ",", "'full'", ")", "else", ":", "d", "=", "n", "return", "(", "np", ".", "correlate", "(", "xo", ",", "yo", ",", "'full'", ")", "/", "d", ")", "[", "(", "n", "-", "1", ")", ":", "]" ]
crosscovariance for 1d parameters x .
train
false
20
def serializers(**serializers): def decorator(func): if (not hasattr(func, 'wsgi_serializers')): func.wsgi_serializers = {} func.wsgi_serializers.update(serializers) return func return decorator
[ "def", "serializers", "(", "**", "serializers", ")", ":", "def", "decorator", "(", "func", ")", ":", "if", "(", "not", "hasattr", "(", "func", ",", "'wsgi_serializers'", ")", ")", ":", "func", ".", "wsgi_serializers", "=", "{", "}", "func", ".", "wsgi_serializers", ".", "update", "(", "serializers", ")", "return", "func", "return", "decorator" ]
returns the serializers modules .
train
false
21
def rgb2short(r, g, b): incs = (0, 95, 135, 175, 215, 255) parts = [r, g, b] res = [] for part in parts: i = 0 while (i < (len(incs) - 1)): (s, b) = (incs[i], incs[(i + 1)]) if (s <= part <= b): s1 = abs((s - part)) b1 = abs((b - part)) if (s1 < b1): closest = s else: closest = b res.append(closest) break i += 1 return RGB2SHORT_DICT[tuple(res)]
[ "def", "rgb2short", "(", "r", ",", "g", ",", "b", ")", ":", "incs", "=", "(", "0", ",", "95", ",", "135", ",", "175", ",", "215", ",", "255", ")", "parts", "=", "[", "r", ",", "g", ",", "b", "]", "res", "=", "[", "]", "for", "part", "in", "parts", ":", "i", "=", "0", "while", "(", "i", "<", "(", "len", "(", "incs", ")", "-", "1", ")", ")", ":", "(", "s", ",", "b", ")", "=", "(", "incs", "[", "i", "]", ",", "incs", "[", "(", "i", "+", "1", ")", "]", ")", "if", "(", "s", "<=", "part", "<=", "b", ")", ":", "s1", "=", "abs", "(", "(", "s", "-", "part", ")", ")", "b1", "=", "abs", "(", "(", "b", "-", "part", ")", ")", "if", "(", "s1", "<", "b1", ")", ":", "closest", "=", "s", "else", ":", "closest", "=", "b", "res", ".", "append", "(", "closest", ")", "break", "i", "+=", "1", "return", "RGB2SHORT_DICT", "[", "tuple", "(", "res", ")", "]" ]
rgb to short .
train
true
22
def output(): return s3_rest_controller()
[ "def", "output", "(", ")", ":", "return", "s3_rest_controller", "(", ")" ]
output -> html string either return the result of a function or a sparse htmlized error message and a message in the server log .
train
false
24
def gf_factor_sqf(f, p, K, method=None): (lc, f) = gf_monic(f, p, K) if (gf_degree(f) < 1): return (lc, []) method = (method or query('GF_FACTOR_METHOD')) if (method is not None): factors = _factor_methods[method](f, p, K) else: factors = gf_zassenhaus(f, p, K) return (lc, factors)
[ "def", "gf_factor_sqf", "(", "f", ",", "p", ",", "K", ",", "method", "=", "None", ")", ":", "(", "lc", ",", "f", ")", "=", "gf_monic", "(", "f", ",", "p", ",", "K", ")", "if", "(", "gf_degree", "(", "f", ")", "<", "1", ")", ":", "return", "(", "lc", ",", "[", "]", ")", "method", "=", "(", "method", "or", "query", "(", "'GF_FACTOR_METHOD'", ")", ")", "if", "(", "method", "is", "not", "None", ")", ":", "factors", "=", "_factor_methods", "[", "method", "]", "(", "f", ",", "p", ",", "K", ")", "else", ":", "factors", "=", "gf_zassenhaus", "(", "f", ",", "p", ",", "K", ")", "return", "(", "lc", ",", "factors", ")" ]
factor a square-free polynomial f in gf(p)[x] .
train
false
26
def draw_nx(G, pos, **kwds): draw(G, pos, **kwds)
[ "def", "draw_nx", "(", "G", ",", "pos", ",", "**", "kwds", ")", ":", "draw", "(", "G", ",", "pos", ",", "**", "kwds", ")" ]
for backward compatibility; use draw or draw_networkx .
train
false
27
def start_clientbrowser(config, args): logger.info('Start client mode (browser)') global client from glances.client_browser import GlancesClientBrowser client = GlancesClientBrowser(config=config, args=args) client.serve_forever() client.end()
[ "def", "start_clientbrowser", "(", "config", ",", "args", ")", ":", "logger", ".", "info", "(", "'Start client mode (browser)'", ")", "global", "client", "from", "glances", ".", "client_browser", "import", "GlancesClientBrowser", "client", "=", "GlancesClientBrowser", "(", "config", "=", "config", ",", "args", "=", "args", ")", "client", ".", "serve_forever", "(", ")", "client", ".", "end", "(", ")" ]
start the browser client mode .
train
false
28
@raises(ValueError) def test_bootstrap_arglength(): algo.bootstrap(np.arange(5), np.arange(10))
[ "@", "raises", "(", "ValueError", ")", "def", "test_bootstrap_arglength", "(", ")", ":", "algo", ".", "bootstrap", "(", "np", ".", "arange", "(", "5", ")", ",", "np", ".", "arange", "(", "10", ")", ")" ]
test that different length args raise valueerror .
train
false
29
def runwsgi(func): if os.environ.has_key('SERVER_SOFTWARE'): os.environ['FCGI_FORCE_CGI'] = 'Y' if (os.environ.has_key('PHP_FCGI_CHILDREN') or os.environ.has_key('SERVER_SOFTWARE')): import flup.server.fcgi return runfcgi(func) if ('scgi' in sys.argv): import flup.server.scgi return runscgi(func) return runsimple(func, listget(sys.argv, 1, 8080))
[ "def", "runwsgi", "(", "func", ")", ":", "if", "os", ".", "environ", ".", "has_key", "(", "'SERVER_SOFTWARE'", ")", ":", "os", ".", "environ", "[", "'FCGI_FORCE_CGI'", "]", "=", "'Y'", "if", "(", "os", ".", "environ", ".", "has_key", "(", "'PHP_FCGI_CHILDREN'", ")", "or", "os", ".", "environ", ".", "has_key", "(", "'SERVER_SOFTWARE'", ")", ")", ":", "import", "flup", ".", "server", ".", "fcgi", "return", "runfcgi", "(", "func", ")", "if", "(", "'scgi'", "in", "sys", ".", "argv", ")", ":", "import", "flup", ".", "server", ".", "scgi", "return", "runscgi", "(", "func", ")", "return", "runsimple", "(", "func", ",", "listget", "(", "sys", ".", "argv", ",", "1", ",", "8080", ")", ")" ]
runs a wsgi-compatible function using fcgi .
train
false
30
def random_bucket_name(prefix='awscli-s3integ-', num_random=10): return (prefix + random_chars(num_random))
[ "def", "random_bucket_name", "(", "prefix", "=", "'awscli-s3integ-'", ",", "num_random", "=", "10", ")", ":", "return", "(", "prefix", "+", "random_chars", "(", "num_random", ")", ")" ]
generate a random s3 bucket name .
train
false
31
def _proxy_process(proxyname, test): changes_old = [] changes_new = [] if (not _is_proxy_running(proxyname)): if (not test): __salt__['cmd.run_all']('salt-proxy --proxyid={0} -l info -d'.format(salt.ext.six.moves.shlex_quote(proxyname)), timeout=5) changes_new.append('Salt Proxy: Started proxy process for {0}'.format(proxyname)) else: changes_new.append('Salt Proxy: process {0} will be started'.format(proxyname)) else: changes_old.append('Salt Proxy: already running for {0}'.format(proxyname)) return (True, changes_new, changes_old)
[ "def", "_proxy_process", "(", "proxyname", ",", "test", ")", ":", "changes_old", "=", "[", "]", "changes_new", "=", "[", "]", "if", "(", "not", "_is_proxy_running", "(", "proxyname", ")", ")", ":", "if", "(", "not", "test", ")", ":", "__salt__", "[", "'cmd.run_all'", "]", "(", "'salt-proxy --proxyid={0} -l info -d'", ".", "format", "(", "salt", ".", "ext", ".", "six", ".", "moves", ".", "shlex_quote", "(", "proxyname", ")", ")", ",", "timeout", "=", "5", ")", "changes_new", ".", "append", "(", "'Salt Proxy: Started proxy process for {0}'", ".", "format", "(", "proxyname", ")", ")", "else", ":", "changes_new", ".", "append", "(", "'Salt Proxy: process {0} will be started'", ".", "format", "(", "proxyname", ")", ")", "else", ":", "changes_old", ".", "append", "(", "'Salt Proxy: already running for {0}'", ".", "format", "(", "proxyname", ")", ")", "return", "(", "True", ",", "changes_new", ",", "changes_old", ")" ]
check and execute proxy process .
train
true
32
def _offset_or_limit_clause(element, name=None, type_=None): if (element is None): return None elif hasattr(element, '__clause_element__'): return element.__clause_element__() elif isinstance(element, Visitable): return element else: value = util.asint(element) return _OffsetLimitParam(name, value, type_=type_, unique=True)
[ "def", "_offset_or_limit_clause", "(", "element", ",", "name", "=", "None", ",", "type_", "=", "None", ")", ":", "if", "(", "element", "is", "None", ")", ":", "return", "None", "elif", "hasattr", "(", "element", ",", "'__clause_element__'", ")", ":", "return", "element", ".", "__clause_element__", "(", ")", "elif", "isinstance", "(", "element", ",", "Visitable", ")", ":", "return", "element", "else", ":", "value", "=", "util", ".", "asint", "(", "element", ")", "return", "_OffsetLimitParam", "(", "name", ",", "value", ",", "type_", "=", "type_", ",", "unique", "=", "True", ")" ]
convert the given value to an "offset or limit" clause .
train
false
34
def _retrieve_device_config(): return __salt__['snmp.config']()
[ "def", "_retrieve_device_config", "(", ")", ":", "return", "__salt__", "[", "'snmp.config'", "]", "(", ")" ]
retrieves the snmp config from the device .
train
false
35
def normalize_formset_dict(formset, attr_list): assert isinstance(formset, BaseSimpleFormSet) res = [] for form in formset.forms: res.append(normalize_form_dict(form, attr_list)) return res
[ "def", "normalize_formset_dict", "(", "formset", ",", "attr_list", ")", ":", "assert", "isinstance", "(", "formset", ",", "BaseSimpleFormSet", ")", "res", "=", "[", "]", "for", "form", "in", "formset", ".", "forms", ":", "res", ".", "append", "(", "normalize_form_dict", "(", "form", ",", "attr_list", ")", ")", "return", "res" ]
normalize_formset_dict -> a list of dictionary of .
train
false
36
def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT): return datetime.datetime.strptime(timestr, fmt)
[ "def", "parse_strtime", "(", "timestr", ",", "fmt", "=", "PERFECT_TIME_FORMAT", ")", ":", "return", "datetime", ".", "datetime", ".", "strptime", "(", "timestr", ",", "fmt", ")" ]
turn a formatted time back into a datetime .
train
false
37
def hash_filehash(filename): md4 = hashlib.new(u'md4').copy def gen(f): while True: x = f.read(9728000) if x: (yield x) else: return def md4_hash(data): m = md4() m.update(data) return m with open(filename, u'rb') as f: a = gen(f) hashes = [md4_hash(data).digest() for data in a] if (len(hashes) == 1): return to_hex(hashes[0]) else: return md4_hash(reduce((lambda a, d: (a + d)), hashes, u'')).hexd
[ "def", "hash_filehash", "(", "filename", ")", ":", "md4", "=", "hashlib", ".", "new", "(", "u'md4'", ")", ".", "copy", "def", "gen", "(", "f", ")", ":", "while", "True", ":", "x", "=", "f", ".", "read", "(", "9728000", ")", "if", "x", ":", "(", "yield", "x", ")", "else", ":", "return", "def", "md4_hash", "(", "data", ")", ":", "m", "=", "md4", "(", ")", "m", ".", "update", "(", "data", ")", "return", "m", "with", "open", "(", "filename", ",", "u'rb'", ")", "as", "f", ":", "a", "=", "gen", "(", "f", ")", "hashes", "=", "[", "md4_hash", "(", "data", ")", ".", "digest", "(", ")", "for", "data", "in", "a", "]", "if", "(", "len", "(", "hashes", ")", "==", "1", ")", ":", "return", "to_hex", "(", "hashes", "[", "0", "]", ")", "else", ":", "return", "md4_hash", "(", "reduce", "(", "(", "lambda", "a", ",", "d", ":", "(", "a", "+", "d", ")", ")", ",", "hashes", ",", "u''", ")", ")", ".", "hexd" ]
returns the ed2k hash of a given file .
train
false
39
@receiver(user_logged_in) def log_successful_login(sender, request, user, **kwargs): if settings.FEATURES['SQUELCH_PII_IN_LOGS']: AUDIT_LOG.info(u'Login success - user.id: {0}'.format(user.id)) else: AUDIT_LOG.info(u'Login success - {0} ({1})'.format(user.username, user.email))
[ "@", "receiver", "(", "user_logged_in", ")", "def", "log_successful_login", "(", "sender", ",", "request", ",", "user", ",", "**", "kwargs", ")", ":", "if", "settings", ".", "FEATURES", "[", "'SQUELCH_PII_IN_LOGS'", "]", ":", "AUDIT_LOG", ".", "info", "(", "u'Login success - user.id: {0}'", ".", "format", "(", "user", ".", "id", ")", ")", "else", ":", "AUDIT_LOG", ".", "info", "(", "u'Login success - {0} ({1})'", ".", "format", "(", "user", ".", "username", ",", "user", ".", "email", ")", ")" ]
handler to log when logins have occurred successfully .
train
false
43
def zoom_effect02(ax1, ax2, **kwargs): tt = (ax1.transScale + (ax1.transLimits + ax2.transAxes)) trans = blended_transform_factory(ax2.transData, tt) mybbox1 = ax1.bbox mybbox2 = TransformedBbox(ax1.viewLim, trans) prop_patches = kwargs.copy() prop_patches['ec'] = 'none' prop_patches['alpha'] = 0.2 (c1, c2, bbox_patch1, bbox_patch2, p) = connect_bbox(mybbox1, mybbox2, loc1a=3, loc2a=2, loc1b=4, loc2b=1, prop_lines=kwargs, prop_patches=prop_patches) ax1.add_patch(bbox_patch1) ax2.add_patch(bbox_patch2) ax2.add_patch(c1) ax2.add_patch(c2) ax2.add_patch(p) return (c1, c2, bbox_patch1, bbox_patch2, p)
[ "def", "zoom_effect02", "(", "ax1", ",", "ax2", ",", "**", "kwargs", ")", ":", "tt", "=", "(", "ax1", ".", "transScale", "+", "(", "ax1", ".", "transLimits", "+", "ax2", ".", "transAxes", ")", ")", "trans", "=", "blended_transform_factory", "(", "ax2", ".", "transData", ",", "tt", ")", "mybbox1", "=", "ax1", ".", "bbox", "mybbox2", "=", "TransformedBbox", "(", "ax1", ".", "viewLim", ",", "trans", ")", "prop_patches", "=", "kwargs", ".", "copy", "(", ")", "prop_patches", "[", "'ec'", "]", "=", "'none'", "prop_patches", "[", "'alpha'", "]", "=", "0.2", "(", "c1", ",", "c2", ",", "bbox_patch1", ",", "bbox_patch2", ",", "p", ")", "=", "connect_bbox", "(", "mybbox1", ",", "mybbox2", ",", "loc1a", "=", "3", ",", "loc2a", "=", "2", ",", "loc1b", "=", "4", ",", "loc2b", "=", "1", ",", "prop_lines", "=", "kwargs", ",", "prop_patches", "=", "prop_patches", ")", "ax1", ".", "add_patch", "(", "bbox_patch1", ")", "ax2", ".", "add_patch", "(", "bbox_patch2", ")", "ax2", ".", "add_patch", "(", "c1", ")", "ax2", ".", "add_patch", "(", "c2", ")", "ax2", ".", "add_patch", "(", "p", ")", "return", "(", "c1", ",", "c2", ",", "bbox_patch1", ",", "bbox_patch2", ",", "p", ")" ]
ax1 : the main axes ax1 : the zoomed axes similar to zoom_effect01 .
train
false
44
def _expand_table(table): return np.repeat([[1, 1], [1, 0], [0, 1], [0, 0]], table.ravel(), axis=0)
[ "def", "_expand_table", "(", "table", ")", ":", "return", "np", ".", "repeat", "(", "[", "[", "1", ",", "1", "]", ",", "[", "1", ",", "0", "]", ",", "[", "0", ",", "1", "]", ",", "[", "0", ",", "0", "]", "]", ",", "table", ".", "ravel", "(", ")", ",", "axis", "=", "0", ")" ]
expand a 2 by 2 contingency table to observations .
train
false
45
def load_extra_data(backend, details, response, uid, user, social_user=None, *args, **kwargs): social_user = (social_user or UserSocialAuth.get_social_auth(backend.name, uid, user)) if social_user: extra_data = backend.extra_data(user, uid, response, details) if (kwargs.get('original_email') and ('email' not in extra_data)): extra_data['email'] = kwargs.get('original_email') if (extra_data and (social_user.extra_data != extra_data)): if social_user.extra_data: social_user.extra_data.update(extra_data) else: social_user.extra_data = extra_data social_user.save() return {'social_user': social_user}
[ "def", "load_extra_data", "(", "backend", ",", "details", ",", "response", ",", "uid", ",", "user", ",", "social_user", "=", "None", ",", "*", "args", ",", "**", "kwargs", ")", ":", "social_user", "=", "(", "social_user", "or", "UserSocialAuth", ".", "get_social_auth", "(", "backend", ".", "name", ",", "uid", ",", "user", ")", ")", "if", "social_user", ":", "extra_data", "=", "backend", ".", "extra_data", "(", "user", ",", "uid", ",", "response", ",", "details", ")", "if", "(", "kwargs", ".", "get", "(", "'original_email'", ")", "and", "(", "'email'", "not", "in", "extra_data", ")", ")", ":", "extra_data", "[", "'email'", "]", "=", "kwargs", ".", "get", "(", "'original_email'", ")", "if", "(", "extra_data", "and", "(", "social_user", ".", "extra_data", "!=", "extra_data", ")", ")", ":", "if", "social_user", ".", "extra_data", ":", "social_user", ".", "extra_data", ".", "update", "(", "extra_data", ")", "else", ":", "social_user", ".", "extra_data", "=", "extra_data", "social_user", ".", "save", "(", ")", "return", "{", "'social_user'", ":", "social_user", "}" ]
load extra data from provider and store it on current usersocialauth extra_data field .
train
false
46
def arbitrary(module_name, func_name, args, kwargs={}): if module_name.startswith('calibre_plugins'): from calibre.customize.ui import find_plugin find_plugin module = importlib.import_module(module_name) func = getattr(module, func_name) return func(*args, **kwargs)
[ "def", "arbitrary", "(", "module_name", ",", "func_name", ",", "args", ",", "kwargs", "=", "{", "}", ")", ":", "if", "module_name", ".", "startswith", "(", "'calibre_plugins'", ")", ":", "from", "calibre", ".", "customize", ".", "ui", "import", "find_plugin", "find_plugin", "module", "=", "importlib", ".", "import_module", "(", "module_name", ")", "func", "=", "getattr", "(", "module", ",", "func_name", ")", "return", "func", "(", "*", "args", ",", "**", "kwargs", ")" ]
an entry point that allows arbitrary functions to be run in a parallel process .
train
false
47
def claModelControlDisableTPLearningCb(claModel): assert isinstance(claModel, CLAModel) claModel._getTPRegion().setParameter('learningMode', False) return
[ "def", "claModelControlDisableTPLearningCb", "(", "claModel", ")", ":", "assert", "isinstance", "(", "claModel", ",", "CLAModel", ")", "claModel", ".", "_getTPRegion", "(", ")", ".", "setParameter", "(", "'learningMode'", ",", "False", ")", "return" ]
disables learning in the cla models temporal pooler .
train
false
48
def create_api_deployment(restApiId, stageName, stageDescription='', description='', cacheClusterEnabled=False, cacheClusterSize='0.5', variables=None, region=None, key=None, keyid=None, profile=None): try: variables = (dict() if (variables is None) else variables) conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) deployment = conn.create_deployment(restApiId=restApiId, stageName=stageName, stageDescription=stageDescription, description=description, cacheClusterEnabled=cacheClusterEnabled, cacheClusterSize=cacheClusterSize, variables=variables) return {'created': True, 'deployment': _convert_datetime_str(deployment)} except ClientError as e: return {'created': False, 'error': salt.utils.boto3.get_error(e)}
[ "def", "create_api_deployment", "(", "restApiId", ",", "stageName", ",", "stageDescription", "=", "''", ",", "description", "=", "''", ",", "cacheClusterEnabled", "=", "False", ",", "cacheClusterSize", "=", "'0.5'", ",", "variables", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "try", ":", "variables", "=", "(", "dict", "(", ")", "if", "(", "variables", "is", "None", ")", "else", "variables", ")", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "deployment", "=", "conn", ".", "create_deployment", "(", "restApiId", "=", "restApiId", ",", "stageName", "=", "stageName", ",", "stageDescription", "=", "stageDescription", ",", "description", "=", "description", ",", "cacheClusterEnabled", "=", "cacheClusterEnabled", ",", "cacheClusterSize", "=", "cacheClusterSize", ",", "variables", "=", "variables", ")", "return", "{", "'created'", ":", "True", ",", "'deployment'", ":", "_convert_datetime_str", "(", "deployment", ")", "}", "except", "ClientError", "as", "e", ":", "return", "{", "'created'", ":", "False", ",", "'error'", ":", "salt", ".", "utils", ".", "boto3", ".", "get_error", "(", "e", ")", "}" ]
creates a new api deployment .
train
true
49
def _point_along_a_line(x0, y0, x1, y1, d): (dx, dy) = ((x0 - x1), (y0 - y1)) ff = (d / (((dx * dx) + (dy * dy)) ** 0.5)) (x2, y2) = ((x0 - (ff * dx)), (y0 - (ff * dy))) return (x2, y2)
[ "def", "_point_along_a_line", "(", "x0", ",", "y0", ",", "x1", ",", "y1", ",", "d", ")", ":", "(", "dx", ",", "dy", ")", "=", "(", "(", "x0", "-", "x1", ")", ",", "(", "y0", "-", "y1", ")", ")", "ff", "=", "(", "d", "/", "(", "(", "(", "dx", "*", "dx", ")", "+", "(", "dy", "*", "dy", ")", ")", "**", "0.5", ")", ")", "(", "x2", ",", "y2", ")", "=", "(", "(", "x0", "-", "(", "ff", "*", "dx", ")", ")", ",", "(", "y0", "-", "(", "ff", "*", "dy", ")", ")", ")", "return", "(", "x2", ",", "y2", ")" ]
find a point along a line connecting -- whose distance from is d .
train
false
50
def s3_roles_permitted(name='roles_permitted', **attr): T = current.T represent = S3Represent(lookup='auth_group', fields=['role']) if ('label' not in attr): attr['label'] = T('Roles Permitted') if ('sortby' not in attr): attr['sortby'] = 'role' if ('represent' not in attr): attr['represent'] = represent if ('requires' not in attr): attr['requires'] = IS_EMPTY_OR(IS_ONE_OF(current.db, 'auth_group.id', represent, multiple=True)) if ('comment' not in attr): attr['comment'] = DIV(_class='tooltip', _title=('%s|%s' % (T('Roles Permitted'), T('If this record should be restricted then select which role(s) are permitted to access the record here.')))) if ('ondelete' not in attr): attr['ondelete'] = 'RESTRICT' f = S3ReusableField(name, 'list:reference auth_group', **attr) return f()
[ "def", "s3_roles_permitted", "(", "name", "=", "'roles_permitted'", ",", "**", "attr", ")", ":", "T", "=", "current", ".", "T", "represent", "=", "S3Represent", "(", "lookup", "=", "'auth_group'", ",", "fields", "=", "[", "'role'", "]", ")", "if", "(", "'label'", "not", "in", "attr", ")", ":", "attr", "[", "'label'", "]", "=", "T", "(", "'Roles Permitted'", ")", "if", "(", "'sortby'", "not", "in", "attr", ")", ":", "attr", "[", "'sortby'", "]", "=", "'role'", "if", "(", "'represent'", "not", "in", "attr", ")", ":", "attr", "[", "'represent'", "]", "=", "represent", "if", "(", "'requires'", "not", "in", "attr", ")", ":", "attr", "[", "'requires'", "]", "=", "IS_EMPTY_OR", "(", "IS_ONE_OF", "(", "current", ".", "db", ",", "'auth_group.id'", ",", "represent", ",", "multiple", "=", "True", ")", ")", "if", "(", "'comment'", "not", "in", "attr", ")", ":", "attr", "[", "'comment'", "]", "=", "DIV", "(", "_class", "=", "'tooltip'", ",", "_title", "=", "(", "'%s|%s'", "%", "(", "T", "(", "'Roles Permitted'", ")", ",", "T", "(", "'If this record should be restricted then select which role(s) are permitted to access the record here.'", ")", ")", ")", ")", "if", "(", "'ondelete'", "not", "in", "attr", ")", ":", "attr", "[", "'ondelete'", "]", "=", "'RESTRICT'", "f", "=", "S3ReusableField", "(", "name", ",", "'list:reference auth_group'", ",", "**", "attr", ")", "return", "f", "(", ")" ]
list of roles permitted to access a resource - used by cms .
train
false
52
@pick_context_manager_writer def instance_group_update(context, group_uuid, values): group = model_query(context, models.InstanceGroup).filter_by(uuid=group_uuid).first() if (not group): raise exception.InstanceGroupNotFound(group_uuid=group_uuid) policies = values.get('policies') if (policies is not None): _instance_group_policies_add(context, group.id, values.pop('policies'), set_delete=True) members = values.get('members') if (members is not None): _instance_group_members_add(context, group.id, values.pop('members'), set_delete=True) group.update(values) if policies: values['policies'] = policies if members: values['members'] = members
[ "@", "pick_context_manager_writer", "def", "instance_group_update", "(", "context", ",", "group_uuid", ",", "values", ")", ":", "group", "=", "model_query", "(", "context", ",", "models", ".", "InstanceGroup", ")", ".", "filter_by", "(", "uuid", "=", "group_uuid", ")", ".", "first", "(", ")", "if", "(", "not", "group", ")", ":", "raise", "exception", ".", "InstanceGroupNotFound", "(", "group_uuid", "=", "group_uuid", ")", "policies", "=", "values", ".", "get", "(", "'policies'", ")", "if", "(", "policies", "is", "not", "None", ")", ":", "_instance_group_policies_add", "(", "context", ",", "group", ".", "id", ",", "values", ".", "pop", "(", "'policies'", ")", ",", "set_delete", "=", "True", ")", "members", "=", "values", ".", "get", "(", "'members'", ")", "if", "(", "members", "is", "not", "None", ")", ":", "_instance_group_members_add", "(", "context", ",", "group", ".", "id", ",", "values", ".", "pop", "(", "'members'", ")", ",", "set_delete", "=", "True", ")", "group", ".", "update", "(", "values", ")", "if", "policies", ":", "values", "[", "'policies'", "]", "=", "policies", "if", "members", ":", "values", "[", "'members'", "]", "=", "members" ]
update the attributes of an group .
train
false
53
def _tree_to_bitstrs(tree): clades_bitstrs = {} term_names = [term.name for term in tree.find_clades(terminal=True)] for clade in tree.find_clades(terminal=False): bitstr = _clade_to_bitstr(clade, term_names) clades_bitstrs[clade] = bitstr return clades_bitstrs
[ "def", "_tree_to_bitstrs", "(", "tree", ")", ":", "clades_bitstrs", "=", "{", "}", "term_names", "=", "[", "term", ".", "name", "for", "term", "in", "tree", ".", "find_clades", "(", "terminal", "=", "True", ")", "]", "for", "clade", "in", "tree", ".", "find_clades", "(", "terminal", "=", "False", ")", ":", "bitstr", "=", "_clade_to_bitstr", "(", "clade", ",", "term_names", ")", "clades_bitstrs", "[", "clade", "]", "=", "bitstr", "return", "clades_bitstrs" ]
create a dict of a trees clades to corresponding bitstrings .
train
false
54
def timeuntil(d, now=None): return timesince(d, now, reversed=True)
[ "def", "timeuntil", "(", "d", ",", "now", "=", "None", ")", ":", "return", "timesince", "(", "d", ",", "now", ",", "reversed", "=", "True", ")" ]
formats a date as the time until that date .
train
false
56
def test_hashbang(): entry = tokenize('#!this is a comment\n') assert (entry == [])
[ "def", "test_hashbang", "(", ")", ":", "entry", "=", "tokenize", "(", "'#!this is a comment\\n'", ")", "assert", "(", "entry", "==", "[", "]", ")" ]
ensure we can escape things .
train
false
59
@context.quietfunc @with_device def exists(path): with AdbClient() as c: return bool(c.stat(path))
[ "@", "context", ".", "quietfunc", "@", "with_device", "def", "exists", "(", "path", ")", ":", "with", "AdbClient", "(", ")", "as", "c", ":", "return", "bool", "(", "c", ".", "stat", "(", "path", ")", ")" ]
check if a user exists .
train
false
60
def disassociate_api_key_stagekeys(apiKey, stagekeyslist, region=None, key=None, keyid=None, profile=None): try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) pvlist = [('/stages', stagekey) for stagekey in stagekeyslist] response = _api_key_patch_remove(conn, apiKey, pvlist) return {'disassociated': True} except ClientError as e: return {'disassociated': False, 'error': salt.utils.boto3.get_error(e)}
[ "def", "disassociate_api_key_stagekeys", "(", "apiKey", ",", "stagekeyslist", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "try", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "pvlist", "=", "[", "(", "'/stages'", ",", "stagekey", ")", "for", "stagekey", "in", "stagekeyslist", "]", "response", "=", "_api_key_patch_remove", "(", "conn", ",", "apiKey", ",", "pvlist", ")", "return", "{", "'disassociated'", ":", "True", "}", "except", "ClientError", "as", "e", ":", "return", "{", "'disassociated'", ":", "False", ",", "'error'", ":", "salt", ".", "utils", ".", "boto3", ".", "get_error", "(", "e", ")", "}" ]
disassociate the given stagekeyslist to the given apikey .
train
false
61
@then(u'we see database dropped') def step_see_db_dropped(context): _expect_exact(context, u'DROP DATABASE', timeout=2)
[ "@", "then", "(", "u'we see database dropped'", ")", "def", "step_see_db_dropped", "(", "context", ")", ":", "_expect_exact", "(", "context", ",", "u'DROP DATABASE'", ",", "timeout", "=", "2", ")" ]
wait to see drop database output .
train
false
62
@bdd.when(bdd.parsers.parse('I wait for the javascript message "{message}"')) def javascript_message_when(quteproc, message): quteproc.wait_for_js(message)
[ "@", "bdd", ".", "when", "(", "bdd", ".", "parsers", ".", "parse", "(", "'I wait for the javascript message \"{message}\"'", ")", ")", "def", "javascript_message_when", "(", "quteproc", ",", "message", ")", ":", "quteproc", ".", "wait_for_js", "(", "message", ")" ]
make sure the given message was logged via javascript .
train
false
63
def _gitPresent(): try: gitvers = subprocess.check_output('git --version'.split(), stderr=subprocess.PIPE) except (CalledProcessError, OSError): gitvers = '' return bool(gitvers.startswith('git version'))
[ "def", "_gitPresent", "(", ")", ":", "try", ":", "gitvers", "=", "subprocess", ".", "check_output", "(", "'git --version'", ".", "split", "(", ")", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "except", "(", "CalledProcessError", ",", "OSError", ")", ":", "gitvers", "=", "''", "return", "bool", "(", "gitvers", ".", "startswith", "(", "'git version'", ")", ")" ]
check for git on command-line .
train
false
64
def create_mount_target(filesystemid, subnetid, ipaddress=None, securitygroups=None, keyid=None, key=None, profile=None, region=None, **kwargs): client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) return client.create_mount_point(FileSystemId=filesystemid, SubnetId=subnetid, IpAddress=ipaddress, SecurityGroups=securitygroups)
[ "def", "create_mount_target", "(", "filesystemid", ",", "subnetid", ",", "ipaddress", "=", "None", ",", "securitygroups", "=", "None", ",", "keyid", "=", "None", ",", "key", "=", "None", ",", "profile", "=", "None", ",", "region", "=", "None", ",", "**", "kwargs", ")", ":", "client", "=", "_get_conn", "(", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ",", "region", "=", "region", ")", "return", "client", ".", "create_mount_point", "(", "FileSystemId", "=", "filesystemid", ",", "SubnetId", "=", "subnetid", ",", "IpAddress", "=", "ipaddress", ",", "SecurityGroups", "=", "securitygroups", ")" ]
creates a mount target for a file system .
train
false
65
def IsAutoGenerated(xml_str): try: xml_root = ElementTree.fromstring(xml_str) return ((xml_root.tag == 'datastore-indexes') and _BooleanAttribute(xml_root.attrib.get('autoGenerate', 'false'))) except ElementTree.ParseError: return False
[ "def", "IsAutoGenerated", "(", "xml_str", ")", ":", "try", ":", "xml_root", "=", "ElementTree", ".", "fromstring", "(", "xml_str", ")", "return", "(", "(", "xml_root", ".", "tag", "==", "'datastore-indexes'", ")", "and", "_BooleanAttribute", "(", "xml_root", ".", "attrib", ".", "get", "(", "'autoGenerate'", ",", "'false'", ")", ")", ")", "except", "ElementTree", ".", "ParseError", ":", "return", "False" ]
test if the given datastore-indexes .
train
false
66
def jnp_zeros(n, nt): return jnyn_zeros(n, nt)[1]
[ "def", "jnp_zeros", "(", "n", ",", "nt", ")", ":", "return", "jnyn_zeros", "(", "n", ",", "nt", ")", "[", "1", "]" ]
compute zeros of integer-order bessel function derivative jn(x) .
train
false
68
def set_time(time): time_format = _get_date_time_format(time) dt_obj = datetime.strptime(time, time_format) cmd = 'systemsetup -settime {0}'.format(dt_obj.strftime('%H:%M:%S')) return salt.utils.mac_utils.execute_return_success(cmd)
[ "def", "set_time", "(", "time", ")", ":", "time_format", "=", "_get_date_time_format", "(", "time", ")", "dt_obj", "=", "datetime", ".", "strptime", "(", "time", ",", "time_format", ")", "cmd", "=", "'systemsetup -settime {0}'", ".", "format", "(", "dt_obj", ".", "strftime", "(", "'%H:%M:%S'", ")", ")", "return", "salt", ".", "utils", ".", "mac_utils", ".", "execute_return_success", "(", "cmd", ")" ]
sets the current time .
train
true
69
def del_job_files(job_paths): for path in job_paths: if (path and clip_path(path).lower().startswith(cfg.download_dir.get_path().lower())): remove_all(path, recursive=True)
[ "def", "del_job_files", "(", "job_paths", ")", ":", "for", "path", "in", "job_paths", ":", "if", "(", "path", "and", "clip_path", "(", "path", ")", ".", "lower", "(", ")", ".", "startswith", "(", "cfg", ".", "download_dir", ".", "get_path", "(", ")", ".", "lower", "(", ")", ")", ")", ":", "remove_all", "(", "path", ",", "recursive", "=", "True", ")" ]
remove files of each path in the list .
train
false
70
def get_discount_modules(): return load_module_instances('SHUUP_DISCOUNT_MODULES', 'discount_module')
[ "def", "get_discount_modules", "(", ")", ":", "return", "load_module_instances", "(", "'SHUUP_DISCOUNT_MODULES'", ",", "'discount_module'", ")" ]
get a list of configured discount module instances .
train
false
73
def initialize_log_data(ids_bcs_added_field): log_data = {} for curr_key in ids_bcs_added_field.keys(): base_key = '' if curr_key[0]: base_key += (curr_key[0] + ',') if curr_key[1]: base_key += (curr_key[1] + ',') base_key += ids_bcs_added_field[curr_key] log_data[base_key] = 0 return log_data
[ "def", "initialize_log_data", "(", "ids_bcs_added_field", ")", ":", "log_data", "=", "{", "}", "for", "curr_key", "in", "ids_bcs_added_field", ".", "keys", "(", ")", ":", "base_key", "=", "''", "if", "curr_key", "[", "0", "]", ":", "base_key", "+=", "(", "curr_key", "[", "0", "]", "+", "','", ")", "if", "curr_key", "[", "1", "]", ":", "base_key", "+=", "(", "curr_key", "[", "1", "]", "+", "','", ")", "base_key", "+=", "ids_bcs_added_field", "[", "curr_key", "]", "log_data", "[", "base_key", "]", "=", "0", "return", "log_data" ]
initializes log data .
train
false
76
def is_sequence_of_strings(obj): if (not cbook.iterable(obj)): return False if ((not isinstance(obj, np.ndarray)) and cbook.is_string_like(obj)): return False for o in obj: if (not cbook.is_string_like(o)): return False return True
[ "def", "is_sequence_of_strings", "(", "obj", ")", ":", "if", "(", "not", "cbook", ".", "iterable", "(", "obj", ")", ")", ":", "return", "False", "if", "(", "(", "not", "isinstance", "(", "obj", ",", "np", ".", "ndarray", ")", ")", "and", "cbook", ".", "is_string_like", "(", "obj", ")", ")", ":", "return", "False", "for", "o", "in", "obj", ":", "if", "(", "not", "cbook", ".", "is_string_like", "(", "o", ")", ")", ":", "return", "False", "return", "True" ]
returns true if *obj* is iterable and contains strings .
train
false
77
def highlighting(view, name, style, left, right): if (left is not None): left = left.move((left.begin + BEGIN_LEN), (left.end - BRACKET_LEN)) if (right is not None): right = right.move((right.begin + END_LEN), (right.end - BRACKET_LEN)) return (left, right)
[ "def", "highlighting", "(", "view", ",", "name", ",", "style", ",", "left", ",", "right", ")", ":", "if", "(", "left", "is", "not", "None", ")", ":", "left", "=", "left", ".", "move", "(", "(", "left", ".", "begin", "+", "BEGIN_LEN", ")", ",", "(", "left", ".", "end", "-", "BRACKET_LEN", ")", ")", "if", "(", "right", "is", "not", "None", ")", ":", "right", "=", "right", ".", "move", "(", "(", "right", ".", "begin", "+", "END_LEN", ")", ",", "(", "right", ".", "end", "-", "BRACKET_LEN", ")", ")", "return", "(", "left", ",", "right", ")" ]
highlight only the tag name .
train
false
78
def _inFilesystemNamespace(path): return (path[:1] not in ('\x00', u'\x00'))
[ "def", "_inFilesystemNamespace", "(", "path", ")", ":", "return", "(", "path", "[", ":", "1", "]", "not", "in", "(", "'\\x00'", ",", "u'\\x00'", ")", ")" ]
determine whether the given unix socket path is in a filesystem namespace .
train
false
79
def set_policy(name, table='filter', family='ipv4', **kwargs): ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} for ignore in _STATE_INTERNAL_KEYWORDS: if (ignore in kwargs): del kwargs[ignore] if (__salt__['iptables.get_policy'](table, kwargs['chain'], family) == kwargs['policy']): ret['result'] = True ret['comment'] = 'iptables default policy for chain {0} on table {1} for {2} already set to {3}'.format(kwargs['chain'], table, family, kwargs['policy']) return ret if __opts__['test']: ret['comment'] = 'iptables default policy for chain {0} on table {1} for {2} needs to be set to {3}'.format(kwargs['chain'], table, family, kwargs['policy']) return ret if (not __salt__['iptables.set_policy'](table, kwargs['chain'], kwargs['policy'], family)): ret['changes'] = {'locale': name} ret['result'] = True ret['comment'] = 'Set default policy for {0} to {1} family {2}'.format(kwargs['chain'], kwargs['policy'], family) if ('save' in kwargs): if kwargs['save']: __salt__['iptables.save'](filename=None, family=family) ret['comment'] = 'Set and saved default policy for {0} to {1} family {2}'.format(kwargs['chain'], kwargs['policy'], family) return ret else: ret['result'] = False ret['comment'] = 'Failed to set iptables default policy' return ret
[ "def", "set_policy", "(", "name", ",", "table", "=", "'filter'", ",", "family", "=", "'ipv4'", ",", "**", "kwargs", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "None", ",", "'comment'", ":", "''", "}", "for", "ignore", "in", "_STATE_INTERNAL_KEYWORDS", ":", "if", "(", "ignore", "in", "kwargs", ")", ":", "del", "kwargs", "[", "ignore", "]", "if", "(", "__salt__", "[", "'iptables.get_policy'", "]", "(", "table", ",", "kwargs", "[", "'chain'", "]", ",", "family", ")", "==", "kwargs", "[", "'policy'", "]", ")", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'iptables default policy for chain {0} on table {1} for {2} already set to {3}'", ".", "format", "(", "kwargs", "[", "'chain'", "]", ",", "table", ",", "family", ",", "kwargs", "[", "'policy'", "]", ")", "return", "ret", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "=", "'iptables default policy for chain {0} on table {1} for {2} needs to be set to {3}'", ".", "format", "(", "kwargs", "[", "'chain'", "]", ",", "table", ",", "family", ",", "kwargs", "[", "'policy'", "]", ")", "return", "ret", "if", "(", "not", "__salt__", "[", "'iptables.set_policy'", "]", "(", "table", ",", "kwargs", "[", "'chain'", "]", ",", "kwargs", "[", "'policy'", "]", ",", "family", ")", ")", ":", "ret", "[", "'changes'", "]", "=", "{", "'locale'", ":", "name", "}", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'Set default policy for {0} to {1} family {2}'", ".", "format", "(", "kwargs", "[", "'chain'", "]", ",", "kwargs", "[", "'policy'", "]", ",", "family", ")", "if", "(", "'save'", "in", "kwargs", ")", ":", "if", "kwargs", "[", "'save'", "]", ":", "__salt__", "[", "'iptables.save'", "]", "(", "filename", "=", "None", ",", "family", "=", "family", ")", "ret", "[", "'comment'", "]", "=", "'Set and saved default policy for {0} to {1} family {2}'", ".", "format", "(", "kwargs", "[", "'chain'", "]", ",", "kwargs", "[", "'policy'", "]", ",", "family", ")", "return", "ret", "else", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Failed to set iptables default policy'", "return", "ret" ]
set the current policy for the specified table/chain cli example: .
train
true
80
def test_formatters(Chart): if (Chart._dual or (Chart == Box)): return chart = Chart(formatter=(lambda x, chart, serie: ('%s%s$' % (x, serie.title)))) chart.add('_a', [1, 2, {'value': 3, 'formatter': (lambda x: (u('%s\xc2\xa5') % x))}]) chart.add('_b', [4, 5, 6], formatter=(lambda x: (u('%s\xe2\x82\xac') % x))) chart.x_labels = [2, 4, 6] chart.x_labels_major = [4] q = chart.render_pyquery() assert (set([v.text for v in q('.value')]) == set(((u('4\xe2\x82\xac'), u('5\xe2\x82\xac'), u('6\xe2\x82\xac'), '1_a$', '2_a$', u('3\xc2\xa5')) + (('6_a$', u('15\xe2\x82\xac')) if (Chart in (Pie, SolidGauge)) else ()))))
[ "def", "test_formatters", "(", "Chart", ")", ":", "if", "(", "Chart", ".", "_dual", "or", "(", "Chart", "==", "Box", ")", ")", ":", "return", "chart", "=", "Chart", "(", "formatter", "=", "(", "lambda", "x", ",", "chart", ",", "serie", ":", "(", "'%s%s$'", "%", "(", "x", ",", "serie", ".", "title", ")", ")", ")", ")", "chart", ".", "add", "(", "'_a'", ",", "[", "1", ",", "2", ",", "{", "'value'", ":", "3", ",", "'formatter'", ":", "(", "lambda", "x", ":", "(", "u", "(", "'%s\\xc2\\xa5'", ")", "%", "x", ")", ")", "}", "]", ")", "chart", ".", "add", "(", "'_b'", ",", "[", "4", ",", "5", ",", "6", "]", ",", "formatter", "=", "(", "lambda", "x", ":", "(", "u", "(", "'%s\\xe2\\x82\\xac'", ")", "%", "x", ")", ")", ")", "chart", ".", "x_labels", "=", "[", "2", ",", "4", ",", "6", "]", "chart", ".", "x_labels_major", "=", "[", "4", "]", "q", "=", "chart", ".", "render_pyquery", "(", ")", "assert", "(", "set", "(", "[", "v", ".", "text", "for", "v", "in", "q", "(", "'.value'", ")", "]", ")", "==", "set", "(", "(", "(", "u", "(", "'4\\xe2\\x82\\xac'", ")", ",", "u", "(", "'5\\xe2\\x82\\xac'", ")", ",", "u", "(", "'6\\xe2\\x82\\xac'", ")", ",", "'1_a$'", ",", "'2_a$'", ",", "u", "(", "'3\\xc2\\xa5'", ")", ")", "+", "(", "(", "'6_a$'", ",", "u", "(", "'15\\xe2\\x82\\xac'", ")", ")", "if", "(", "Chart", "in", "(", "Pie", ",", "SolidGauge", ")", ")", "else", "(", ")", ")", ")", ")", ")" ]
test custom formatters .
train
false
81
def test_sort(): model = _create_model([[('B', '', '', 1), ('C', '', '', 2), ('A', '', '', 0)]]) filter_model = sortfilter.CompletionFilterModel(model) filter_model.sort(0, Qt.AscendingOrder) actual = _extract_model_data(filter_model) assert (actual == [[('A', '', ''), ('B', '', ''), ('C', '', '')]]) filter_model.sort(0, Qt.DescendingOrder) actual = _extract_model_data(filter_model) assert (actual == [[('C', '', ''), ('B', '', ''), ('A', '', '')]])
[ "def", "test_sort", "(", ")", ":", "model", "=", "_create_model", "(", "[", "[", "(", "'B'", ",", "''", ",", "''", ",", "1", ")", ",", "(", "'C'", ",", "''", ",", "''", ",", "2", ")", ",", "(", "'A'", ",", "''", ",", "''", ",", "0", ")", "]", "]", ")", "filter_model", "=", "sortfilter", ".", "CompletionFilterModel", "(", "model", ")", "filter_model", ".", "sort", "(", "0", ",", "Qt", ".", "AscendingOrder", ")", "actual", "=", "_extract_model_data", "(", "filter_model", ")", "assert", "(", "actual", "==", "[", "[", "(", "'A'", ",", "''", ",", "''", ")", ",", "(", "'B'", ",", "''", ",", "''", ")", ",", "(", "'C'", ",", "''", ",", "''", ")", "]", "]", ")", "filter_model", ".", "sort", "(", "0", ",", "Qt", ".", "DescendingOrder", ")", "actual", "=", "_extract_model_data", "(", "filter_model", ")", "assert", "(", "actual", "==", "[", "[", "(", "'C'", ",", "''", ",", "''", ")", ",", "(", "'B'", ",", "''", ",", "''", ")", ",", "(", "'A'", ",", "''", ",", "''", ")", "]", "]", ")" ]
ensure that a sort argument passed to sort overrides dumb_sort .
train
false
82
def create_dendrogram(X, orientation='bottom', labels=None, colorscale=None, distfun=None, linkagefun=(lambda x: sch.linkage(x, 'complete'))): if ((not scp) or (not scs) or (not sch)): raise ImportError('FigureFactory.create_dendrogram requires scipy, scipy.spatial and scipy.hierarchy') s = X.shape if (len(s) != 2): exceptions.PlotlyError('X should be 2-dimensional array.') if (distfun is None): distfun = scs.distance.pdist dendrogram = _Dendrogram(X, orientation, labels, colorscale, distfun=distfun, linkagefun=linkagefun) return {'layout': dendrogram.layout, 'data': dendrogram.data}
[ "def", "create_dendrogram", "(", "X", ",", "orientation", "=", "'bottom'", ",", "labels", "=", "None", ",", "colorscale", "=", "None", ",", "distfun", "=", "None", ",", "linkagefun", "=", "(", "lambda", "x", ":", "sch", ".", "linkage", "(", "x", ",", "'complete'", ")", ")", ")", ":", "if", "(", "(", "not", "scp", ")", "or", "(", "not", "scs", ")", "or", "(", "not", "sch", ")", ")", ":", "raise", "ImportError", "(", "'FigureFactory.create_dendrogram requires scipy, scipy.spatial and scipy.hierarchy'", ")", "s", "=", "X", ".", "shape", "if", "(", "len", "(", "s", ")", "!=", "2", ")", ":", "exceptions", ".", "PlotlyError", "(", "'X should be 2-dimensional array.'", ")", "if", "(", "distfun", "is", "None", ")", ":", "distfun", "=", "scs", ".", "distance", ".", "pdist", "dendrogram", "=", "_Dendrogram", "(", "X", ",", "orientation", ",", "labels", ",", "colorscale", ",", "distfun", "=", "distfun", ",", "linkagefun", "=", "linkagefun", ")", "return", "{", "'layout'", ":", "dendrogram", ".", "layout", ",", "'data'", ":", "dendrogram", ".", "data", "}" ]
beta function that returns a dendrogram plotly figure object .
train
false
83
def uni_print(statement, out_file=None): if (out_file is None): out_file = sys.stdout try: out_file.write(statement) except UnicodeEncodeError: new_encoding = getattr(out_file, 'encoding', 'ascii') if (new_encoding is None): new_encoding = 'ascii' new_statement = statement.encode(new_encoding, 'replace').decode(new_encoding) out_file.write(new_statement) out_file.flush()
[ "def", "uni_print", "(", "statement", ",", "out_file", "=", "None", ")", ":", "if", "(", "out_file", "is", "None", ")", ":", "out_file", "=", "sys", ".", "stdout", "try", ":", "out_file", ".", "write", "(", "statement", ")", "except", "UnicodeEncodeError", ":", "new_encoding", "=", "getattr", "(", "out_file", ",", "'encoding'", ",", "'ascii'", ")", "if", "(", "new_encoding", "is", "None", ")", ":", "new_encoding", "=", "'ascii'", "new_statement", "=", "statement", ".", "encode", "(", "new_encoding", ",", "'replace'", ")", ".", "decode", "(", "new_encoding", ")", "out_file", ".", "write", "(", "new_statement", ")", "out_file", ".", "flush", "(", ")" ]
this function is used to properly write unicode to a file .
train
false
84
def pretty_name(name): if (not name): return '' return name.replace('_', ' ').capitalize()
[ "def", "pretty_name", "(", "name", ")", ":", "if", "(", "not", "name", ")", ":", "return", "''", "return", "name", ".", "replace", "(", "'_'", ",", "' '", ")", ".", "capitalize", "(", ")" ]
converts first_name to first name .
train
false
85
def list_snapshots(config='root'): try: snapshots = snapper.ListSnapshots(config) return [_snapshot_to_data(s) for s in snapshots] except dbus.DBusException as exc: raise CommandExecutionError('Error encountered while listing snapshots: {0}'.format(_dbus_exception_to_reason(exc, locals())))
[ "def", "list_snapshots", "(", "config", "=", "'root'", ")", ":", "try", ":", "snapshots", "=", "snapper", ".", "ListSnapshots", "(", "config", ")", "return", "[", "_snapshot_to_data", "(", "s", ")", "for", "s", "in", "snapshots", "]", "except", "dbus", ".", "DBusException", "as", "exc", ":", "raise", "CommandExecutionError", "(", "'Error encountered while listing snapshots: {0}'", ".", "format", "(", "_dbus_exception_to_reason", "(", "exc", ",", "locals", "(", ")", ")", ")", ")" ]
list available snapshots for certain vm or for all .
train
true
86
def branch_list(repo): with open_repo_closing(repo) as r: return r.refs.keys(base='refs/heads/')
[ "def", "branch_list", "(", "repo", ")", ":", "with", "open_repo_closing", "(", "repo", ")", "as", "r", ":", "return", "r", ".", "refs", ".", "keys", "(", "base", "=", "'refs/heads/'", ")" ]
return a list of local or remote branches this explicitly removes head from the list of remote branches .
train
false
87
@depends(HAS_PYVMOMI) def get_ntp_config(host, username, password, protocol=None, port=None, host_names=None): service_instance = salt.utils.vmware.get_service_instance(host=host, username=username, password=password, protocol=protocol, port=port) host_names = _check_hosts(service_instance, host, host_names) ret = {} for host_name in host_names: host_ref = _get_host_ref(service_instance, host, host_name=host_name) ntp_config = host_ref.configManager.dateTimeSystem.dateTimeInfo.ntpConfig.server ret.update({host_name: ntp_config}) return ret
[ "@", "depends", "(", "HAS_PYVMOMI", ")", "def", "get_ntp_config", "(", "host", ",", "username", ",", "password", ",", "protocol", "=", "None", ",", "port", "=", "None", ",", "host_names", "=", "None", ")", ":", "service_instance", "=", "salt", ".", "utils", ".", "vmware", ".", "get_service_instance", "(", "host", "=", "host", ",", "username", "=", "username", ",", "password", "=", "password", ",", "protocol", "=", "protocol", ",", "port", "=", "port", ")", "host_names", "=", "_check_hosts", "(", "service_instance", ",", "host", ",", "host_names", ")", "ret", "=", "{", "}", "for", "host_name", "in", "host_names", ":", "host_ref", "=", "_get_host_ref", "(", "service_instance", ",", "host", ",", "host_name", "=", "host_name", ")", "ntp_config", "=", "host_ref", ".", "configManager", ".", "dateTimeSystem", ".", "dateTimeInfo", ".", "ntpConfig", ".", "server", "ret", ".", "update", "(", "{", "host_name", ":", "ntp_config", "}", ")", "return", "ret" ]
get the ntp configuration information for a given host or list of host_names .
train
true
89
def floating_ip_list(call=None): if (call != 'function'): raise SaltCloudSystemExit('The floating_ip_list action must be called with -f or --function') conn = get_conn() return conn.floating_ip_list()
[ "def", "floating_ip_list", "(", "call", "=", "None", ")", ":", "if", "(", "call", "!=", "'function'", ")", ":", "raise", "SaltCloudSystemExit", "(", "'The floating_ip_list action must be called with -f or --function'", ")", "conn", "=", "get_conn", "(", ")", "return", "conn", ".", "floating_ip_list", "(", ")" ]
list floating ips .
train
false
90
def tsql_query(query, **kwargs): try: cur = _get_connection(**kwargs).cursor() cur.execute(query) return loads(_MssqlEncoder().encode({'resultset': cur.fetchall()}))['resultset'] except Exception as e: return (('Could not run the query',), (str(e),))
[ "def", "tsql_query", "(", "query", ",", "**", "kwargs", ")", ":", "try", ":", "cur", "=", "_get_connection", "(", "**", "kwargs", ")", ".", "cursor", "(", ")", "cur", ".", "execute", "(", "query", ")", "return", "loads", "(", "_MssqlEncoder", "(", ")", ".", "encode", "(", "{", "'resultset'", ":", "cur", ".", "fetchall", "(", ")", "}", ")", ")", "[", "'resultset'", "]", "except", "Exception", "as", "e", ":", "return", "(", "(", "'Could not run the query'", ",", ")", ",", "(", "str", "(", "e", ")", ",", ")", ")" ]
run a sql query and return query result as list of tuples .
train
false
92
def per_cpu_times(): ret = cext.per_cpu_times() return [scputimes(*x) for x in ret]
[ "def", "per_cpu_times", "(", ")", ":", "ret", "=", "cext", ".", "per_cpu_times", "(", ")", "return", "[", "scputimes", "(", "*", "x", ")", "for", "x", "in", "ret", "]" ]
return system per-cpu times as a list of named tuples .
train
false
93
def _run_aws(cmd, region, opts, user, **kwargs): receipthandle = kwargs.pop('receipthandle', None) if receipthandle: kwargs['receipt-handle'] = receipthandle num = kwargs.pop('num', None) if num: kwargs['max-number-of-messages'] = num _formatted_args = ['--{0} "{1}"'.format(k, v) for (k, v) in six.iteritems(kwargs)] cmd = 'aws sqs {cmd} {args} {region} {out}'.format(cmd=cmd, args=' '.join(_formatted_args), region=_region(region), out=_OUTPUT) rtn = __salt__['cmd.run'](cmd, runas=user, python_shell=False) return (json.loads(rtn) if rtn else '')
[ "def", "_run_aws", "(", "cmd", ",", "region", ",", "opts", ",", "user", ",", "**", "kwargs", ")", ":", "receipthandle", "=", "kwargs", ".", "pop", "(", "'receipthandle'", ",", "None", ")", "if", "receipthandle", ":", "kwargs", "[", "'receipt-handle'", "]", "=", "receipthandle", "num", "=", "kwargs", ".", "pop", "(", "'num'", ",", "None", ")", "if", "num", ":", "kwargs", "[", "'max-number-of-messages'", "]", "=", "num", "_formatted_args", "=", "[", "'--{0} \"{1}\"'", ".", "format", "(", "k", ",", "v", ")", "for", "(", "k", ",", "v", ")", "in", "six", ".", "iteritems", "(", "kwargs", ")", "]", "cmd", "=", "'aws sqs {cmd} {args} {region} {out}'", ".", "format", "(", "cmd", "=", "cmd", ",", "args", "=", "' '", ".", "join", "(", "_formatted_args", ")", ",", "region", "=", "_region", "(", "region", ")", ",", "out", "=", "_OUTPUT", ")", "rtn", "=", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ",", "runas", "=", "user", ",", "python_shell", "=", "False", ")", "return", "(", "json", ".", "loads", "(", "rtn", ")", "if", "rtn", "else", "''", ")" ]
runs the given command against aws .
train
true
94
def TrimmedMean(t, p=0.01): t = Trim(t, p) return Mean(t)
[ "def", "TrimmedMean", "(", "t", ",", "p", "=", "0.01", ")", ":", "t", "=", "Trim", "(", "t", ",", "p", ")", "return", "Mean", "(", "t", ")" ]
computes the trimmed mean of a sequence of numbers .
train
false
95
def ePut(Handle, IOType, Channel, Value, x1): if (os.name == 'nt'): staticLib = ctypes.windll.LoadLibrary('labjackud') pv = ctypes.c_double(Value) ec = staticLib.ePut(Handle, IOType, Channel, pv, x1) if (ec != 0): raise LabJackException(ec) else: raise LabJackException(0, 'Function only supported for Windows')
[ "def", "ePut", "(", "Handle", ",", "IOType", ",", "Channel", ",", "Value", ",", "x1", ")", ":", "if", "(", "os", ".", "name", "==", "'nt'", ")", ":", "staticLib", "=", "ctypes", ".", "windll", ".", "LoadLibrary", "(", "'labjackud'", ")", "pv", "=", "ctypes", ".", "c_double", "(", "Value", ")", "ec", "=", "staticLib", ".", "ePut", "(", "Handle", ",", "IOType", ",", "Channel", ",", "pv", ",", "x1", ")", "if", "(", "ec", "!=", "0", ")", ":", "raise", "LabJackException", "(", "ec", ")", "else", ":", "raise", "LabJackException", "(", "0", ",", "'Function only supported for Windows'", ")" ]
put one value to the labjack device eput is equivilent to an addrequest followed by a goone .
train
false
97
def local_binary_pattern(image, P, R, method='default'): assert_nD(image, 2) methods = {'default': ord('D'), 'ror': ord('R'), 'uniform': ord('U'), 'nri_uniform': ord('N'), 'var': ord('V')} image = np.ascontiguousarray(image, dtype=np.double) output = _local_binary_pattern(image, P, R, methods[method.lower()]) return output
[ "def", "local_binary_pattern", "(", "image", ",", "P", ",", "R", ",", "method", "=", "'default'", ")", ":", "assert_nD", "(", "image", ",", "2", ")", "methods", "=", "{", "'default'", ":", "ord", "(", "'D'", ")", ",", "'ror'", ":", "ord", "(", "'R'", ")", ",", "'uniform'", ":", "ord", "(", "'U'", ")", ",", "'nri_uniform'", ":", "ord", "(", "'N'", ")", ",", "'var'", ":", "ord", "(", "'V'", ")", "}", "image", "=", "np", ".", "ascontiguousarray", "(", "image", ",", "dtype", "=", "np", ".", "double", ")", "output", "=", "_local_binary_pattern", "(", "image", ",", "P", ",", "R", ",", "methods", "[", "method", ".", "lower", "(", ")", "]", ")", "return", "output" ]
gray scale and rotation invariant lbp .
train
false
98
def handleNewest(qry): try: getUserName() except: return _skypeError() qry = qry.decode('utf8') try: if ((':' in qry) and (qry.partition(':')[0] in map((lambda s: s[0]), _readFriends()))): return _sendMessageWait(qry) else: return _findNewest() except EnvironmentError: return PyFred('ch.xtin.skypingalfred.error', False).addItem('skypeupdate', 'skype update', 'No Skype Friends Found', 'Use skype update to cache friends!', True, 'update').toXML() except: return PyFred.GenericError()
[ "def", "handleNewest", "(", "qry", ")", ":", "try", ":", "getUserName", "(", ")", "except", ":", "return", "_skypeError", "(", ")", "qry", "=", "qry", ".", "decode", "(", "'utf8'", ")", "try", ":", "if", "(", "(", "':'", "in", "qry", ")", "and", "(", "qry", ".", "partition", "(", "':'", ")", "[", "0", "]", "in", "map", "(", "(", "lambda", "s", ":", "s", "[", "0", "]", ")", ",", "_readFriends", "(", ")", ")", ")", ")", ":", "return", "_sendMessageWait", "(", "qry", ")", "else", ":", "return", "_findNewest", "(", ")", "except", "EnvironmentError", ":", "return", "PyFred", "(", "'ch.xtin.skypingalfred.error'", ",", "False", ")", ".", "addItem", "(", "'skypeupdate'", ",", "'skype update'", ",", "'No Skype Friends Found'", ",", "'Use skype update to cache friends!'", ",", "True", ",", "'update'", ")", ".", "toXML", "(", ")", "except", ":", "return", "PyFred", ".", "GenericError", "(", ")" ]
gets the newest 5 messages .
train
false
99
@validate('tree') def valid_field_in_tree(arch): return all(((child.tag in ('field', 'button')) for child in arch.xpath('/tree/*')))
[ "@", "validate", "(", "'tree'", ")", "def", "valid_field_in_tree", "(", "arch", ")", ":", "return", "all", "(", "(", "(", "child", ".", "tag", "in", "(", "'field'", ",", "'button'", ")", ")", "for", "child", "in", "arch", ".", "xpath", "(", "'/tree/*'", ")", ")", ")" ]
children of tree view must be field or button .
train
false
100
def cr_uid_ids(method): method._api = 'cr_uid_ids' return method
[ "def", "cr_uid_ids", "(", "method", ")", ":", "method", ".", "_api", "=", "'cr_uid_ids'", "return", "method" ]
decorate a traditional-style method that takes cr .
train
false
101
def issue_section(issue): labels = issue.get('labels', []) for label in labels: if (not label['name'].startswith('type: ')): continue if (label['name'] in LOG_SECTION): return LOG_SECTION[label['name']] elif (label['name'] in IGNORE_ISSUE_TYPE): return None else: logging.warn('unknown issue type: "{}" for: {}'.format(label['name'], issue_line(issue))) return None
[ "def", "issue_section", "(", "issue", ")", ":", "labels", "=", "issue", ".", "get", "(", "'labels'", ",", "[", "]", ")", "for", "label", "in", "labels", ":", "if", "(", "not", "label", "[", "'name'", "]", ".", "startswith", "(", "'type: '", ")", ")", ":", "continue", "if", "(", "label", "[", "'name'", "]", "in", "LOG_SECTION", ")", ":", "return", "LOG_SECTION", "[", "label", "[", "'name'", "]", "]", "elif", "(", "label", "[", "'name'", "]", "in", "IGNORE_ISSUE_TYPE", ")", ":", "return", "None", "else", ":", "logging", ".", "warn", "(", "'unknown issue type: \"{}\" for: {}'", ".", "format", "(", "label", "[", "'name'", "]", ",", "issue_line", "(", "issue", ")", ")", ")", "return", "None" ]
returns the section heading for the issue .
train
true
103
@transaction.non_atomic_requests @ensure_csrf_cookie @cache_control(no_cache=True, no_store=True, must_revalidate=True) @require_global_staff @require_http_methods(['POST', 'DELETE']) def certificate_invalidation_view(request, course_id): course_key = CourseKey.from_string(course_id) try: certificate_invalidation_data = parse_request_data(request) certificate = validate_request_data_and_get_certificate(certificate_invalidation_data, course_key) except ValueError as error: return JsonResponse({'message': error.message}, status=400) if (request.method == 'POST'): try: certificate_invalidation = invalidate_certificate(request, certificate, certificate_invalidation_data) except ValueError as error: return JsonResponse({'message': error.message}, status=400) return JsonResponse(certificate_invalidation) elif (request.method == 'DELETE'): try: re_validate_certificate(request, course_key, certificate) except ValueError as error: return JsonResponse({'message': error.message}, status=400) return JsonResponse({}, status=204)
[ "@", "transaction", ".", "non_atomic_requests", "@", "ensure_csrf_cookie", "@", "cache_control", "(", "no_cache", "=", "True", ",", "no_store", "=", "True", ",", "must_revalidate", "=", "True", ")", "@", "require_global_staff", "@", "require_http_methods", "(", "[", "'POST'", ",", "'DELETE'", "]", ")", "def", "certificate_invalidation_view", "(", "request", ",", "course_id", ")", ":", "course_key", "=", "CourseKey", ".", "from_string", "(", "course_id", ")", "try", ":", "certificate_invalidation_data", "=", "parse_request_data", "(", "request", ")", "certificate", "=", "validate_request_data_and_get_certificate", "(", "certificate_invalidation_data", ",", "course_key", ")", "except", "ValueError", "as", "error", ":", "return", "JsonResponse", "(", "{", "'message'", ":", "error", ".", "message", "}", ",", "status", "=", "400", ")", "if", "(", "request", ".", "method", "==", "'POST'", ")", ":", "try", ":", "certificate_invalidation", "=", "invalidate_certificate", "(", "request", ",", "certificate", ",", "certificate_invalidation_data", ")", "except", "ValueError", "as", "error", ":", "return", "JsonResponse", "(", "{", "'message'", ":", "error", ".", "message", "}", ",", "status", "=", "400", ")", "return", "JsonResponse", "(", "certificate_invalidation", ")", "elif", "(", "request", ".", "method", "==", "'DELETE'", ")", ":", "try", ":", "re_validate_certificate", "(", "request", ",", "course_key", ",", "certificate", ")", "except", "ValueError", "as", "error", ":", "return", "JsonResponse", "(", "{", "'message'", ":", "error", ".", "message", "}", ",", "status", "=", "400", ")", "return", "JsonResponse", "(", "{", "}", ",", "status", "=", "204", ")" ]
invalidate/re-validate students to/from certificate .
train
false
104
def rollback(): connection._rollback() set_clean()
[ "def", "rollback", "(", ")", ":", "connection", ".", "_rollback", "(", ")", "set_clean", "(", ")" ]
rolls back a transaction .
train
false
105
def _mathdefault(s): if rcParams[u'_internal.classic_mode']: return (u'\\mathdefault{%s}' % s) else: return (u'{%s}' % s)
[ "def", "_mathdefault", "(", "s", ")", ":", "if", "rcParams", "[", "u'_internal.classic_mode'", "]", ":", "return", "(", "u'\\\\mathdefault{%s}'", "%", "s", ")", "else", ":", "return", "(", "u'{%s}'", "%", "s", ")" ]
for backward compatibility .
train
false
106
@requires_sklearn def test_gat_plot_nonsquared(): gat = _get_data(test_times=dict(start=0.0)) gat.plot() ax = gat.plot_diagonal() scores = ax.get_children()[1].get_lines()[2].get_ydata() assert_equals(len(scores), len(gat.estimators_))
[ "@", "requires_sklearn", "def", "test_gat_plot_nonsquared", "(", ")", ":", "gat", "=", "_get_data", "(", "test_times", "=", "dict", "(", "start", "=", "0.0", ")", ")", "gat", ".", "plot", "(", ")", "ax", "=", "gat", ".", "plot_diagonal", "(", ")", "scores", "=", "ax", ".", "get_children", "(", ")", "[", "1", "]", ".", "get_lines", "(", ")", "[", "2", "]", ".", "get_ydata", "(", ")", "assert_equals", "(", "len", "(", "scores", ")", ",", "len", "(", "gat", ".", "estimators_", ")", ")" ]
test gat diagonal plot .
train
false
107
def _add_keys_to_request(request_field_pb, key_pbs): for key_pb in key_pbs: request_field_pb.add().CopyFrom(key_pb)
[ "def", "_add_keys_to_request", "(", "request_field_pb", ",", "key_pbs", ")", ":", "for", "key_pb", "in", "key_pbs", ":", "request_field_pb", ".", "add", "(", ")", ".", "CopyFrom", "(", "key_pb", ")" ]
add protobuf keys to a request object .
train
false
108
def execute_on_completion(application, config, callback): def inner(environ, start_response): try: result = application(environ, start_response) except: callback(environ) raise return generate_close_and_callback(result, callback, environ) return inner
[ "def", "execute_on_completion", "(", "application", ",", "config", ",", "callback", ")", ":", "def", "inner", "(", "environ", ",", "start_response", ")", ":", "try", ":", "result", "=", "application", "(", "environ", ",", "start_response", ")", "except", ":", "callback", "(", "environ", ")", "raise", "return", "generate_close_and_callback", "(", "result", ",", "callback", ",", "environ", ")", "return", "inner" ]
call callback once complete response is sent .
train
false
109
def qt5_qml_data(directory): qmldir = qt5_qml_dir() return (os.path.join(qmldir, directory), 'qml')
[ "def", "qt5_qml_data", "(", "directory", ")", ":", "qmldir", "=", "qt5_qml_dir", "(", ")", "return", "(", "os", ".", "path", ".", "join", "(", "qmldir", ",", "directory", ")", ",", "'qml'", ")" ]
return qml library directory formatted for data .
train
false
110
def require_finance_admin(func): def wrapped(request, course_id): try: course_key = CourseKey.from_string(course_id) except InvalidKeyError: log.error(u'Unable to find course with course key %s', course_id) return HttpResponseNotFound() access = auth.user_has_role(request.user, CourseFinanceAdminRole(course_key)) if access: return func(request, course_id) else: return HttpResponseForbidden() return wrapped
[ "def", "require_finance_admin", "(", "func", ")", ":", "def", "wrapped", "(", "request", ",", "course_id", ")", ":", "try", ":", "course_key", "=", "CourseKey", ".", "from_string", "(", "course_id", ")", "except", "InvalidKeyError", ":", "log", ".", "error", "(", "u'Unable to find course with course key %s'", ",", "course_id", ")", "return", "HttpResponseNotFound", "(", ")", "access", "=", "auth", ".", "user_has_role", "(", "request", ".", "user", ",", "CourseFinanceAdminRole", "(", "course_key", ")", ")", "if", "access", ":", "return", "func", "(", "request", ",", "course_id", ")", "else", ":", "return", "HttpResponseForbidden", "(", ")", "return", "wrapped" ]
decorator for checking finance administrator access before executing an http endpoint .
train
false
111
def addBeginXMLTag(attributeDictionary, className, depth, output, text=''): depthStart = (' DCTB ' * depth) output.write(('%s<%s%s>%s\n' % (depthStart, className, getAttributeDictionaryString(attributeDictionary), text)))
[ "def", "addBeginXMLTag", "(", "attributeDictionary", ",", "className", ",", "depth", ",", "output", ",", "text", "=", "''", ")", ":", "depthStart", "=", "(", "' DCTB '", "*", "depth", ")", "output", ".", "write", "(", "(", "'%s<%s%s>%s\\n'", "%", "(", "depthStart", ",", "className", ",", "getAttributeDictionaryString", "(", "attributeDictionary", ")", ",", "text", ")", ")", ")" ]
add the begin xml tag .
train
false
113
def test_cons_list(): entry = tokenize('(a . [])')[0] assert (entry == HyList([HySymbol('a')])) assert (type(entry) == HyList) entry = tokenize('(a . ())')[0] assert (entry == HyExpression([HySymbol('a')])) assert (type(entry) == HyExpression) entry = tokenize('(a b . {})')[0] assert (entry == HyDict([HySymbol('a'), HySymbol('b')])) assert (type(entry) == HyDict)
[ "def", "test_cons_list", "(", ")", ":", "entry", "=", "tokenize", "(", "'(a . [])'", ")", "[", "0", "]", "assert", "(", "entry", "==", "HyList", "(", "[", "HySymbol", "(", "'a'", ")", "]", ")", ")", "assert", "(", "type", "(", "entry", ")", "==", "HyList", ")", "entry", "=", "tokenize", "(", "'(a . ())'", ")", "[", "0", "]", "assert", "(", "entry", "==", "HyExpression", "(", "[", "HySymbol", "(", "'a'", ")", "]", ")", ")", "assert", "(", "type", "(", "entry", ")", "==", "HyExpression", ")", "entry", "=", "tokenize", "(", "'(a b . {})'", ")", "[", "0", "]", "assert", "(", "entry", "==", "HyDict", "(", "[", "HySymbol", "(", "'a'", ")", ",", "HySymbol", "(", "'b'", ")", "]", ")", ")", "assert", "(", "type", "(", "entry", ")", "==", "HyDict", ")" ]
check that cons of something and a list gets tokenized as a list .
train
false
114
def _wait_for_step(emr_connection, step, jobflowid, sleeptime): sleep(180) start = time() step_state = get_step_state(emr_connection, jobflowid, step.name, update=True) while (step_state in (LIVE_STATES + [PENDING])): sleep(sleeptime) step_state = get_step_state(emr_connection, jobflowid, step.name) end = time() print ('%s took %0.2fs (exit: %s)' % (step.name, (end - start), step_state)) return step_state
[ "def", "_wait_for_step", "(", "emr_connection", ",", "step", ",", "jobflowid", ",", "sleeptime", ")", ":", "sleep", "(", "180", ")", "start", "=", "time", "(", ")", "step_state", "=", "get_step_state", "(", "emr_connection", ",", "jobflowid", ",", "step", ".", "name", ",", "update", "=", "True", ")", "while", "(", "step_state", "in", "(", "LIVE_STATES", "+", "[", "PENDING", "]", ")", ")", ":", "sleep", "(", "sleeptime", ")", "step_state", "=", "get_step_state", "(", "emr_connection", ",", "jobflowid", ",", "step", ".", "name", ")", "end", "=", "time", "(", ")", "print", "(", "'%s took %0.2fs (exit: %s)'", "%", "(", "step", ".", "name", ",", "(", "end", "-", "start", ")", ",", "step_state", ")", ")", "return", "step_state" ]
poll emr and wait for a step to finish .
train
false
116
def get_occupied_streams(realm): subs_filter = Subscription.objects.filter(active=True, user_profile__realm=realm, user_profile__is_active=True).values('recipient_id') stream_ids = Recipient.objects.filter(type=Recipient.STREAM, id__in=subs_filter).values('type_id') return Stream.objects.filter(id__in=stream_ids, realm=realm, deactivated=False)
[ "def", "get_occupied_streams", "(", "realm", ")", ":", "subs_filter", "=", "Subscription", ".", "objects", ".", "filter", "(", "active", "=", "True", ",", "user_profile__realm", "=", "realm", ",", "user_profile__is_active", "=", "True", ")", ".", "values", "(", "'recipient_id'", ")", "stream_ids", "=", "Recipient", ".", "objects", ".", "filter", "(", "type", "=", "Recipient", ".", "STREAM", ",", "id__in", "=", "subs_filter", ")", ".", "values", "(", "'type_id'", ")", "return", "Stream", ".", "objects", ".", "filter", "(", "id__in", "=", "stream_ids", ",", "realm", "=", "realm", ",", "deactivated", "=", "False", ")" ]
get streams with subscribers .
train
false
117
def test_find_number_6(): s = 'query1e5 not found' r = find_number(s) assert (s[r[0]:r[1]] == '1e5')
[ "def", "test_find_number_6", "(", ")", ":", "s", "=", "'query1e5 not found'", "r", "=", "find_number", "(", "s", ")", "assert", "(", "s", "[", "r", "[", "0", "]", ":", "r", "[", "1", "]", "]", "==", "'1e5'", ")" ]
tests that we find numbers with exponents .
train
false
118
def _build_match_rule(action, target, pluralized): match_rule = policy.RuleCheck('rule', action) (resource, enforce_attr_based_check) = get_resource_and_action(action, pluralized) if enforce_attr_based_check: res_map = attributes.RESOURCE_ATTRIBUTE_MAP if (resource in res_map): for attribute_name in res_map[resource]: if _is_attribute_explicitly_set(attribute_name, res_map[resource], target, action): attribute = res_map[resource][attribute_name] if ('enforce_policy' in attribute): attr_rule = policy.RuleCheck('rule', ('%s:%s' % (action, attribute_name))) if _should_validate_sub_attributes(attribute, target[attribute_name]): attr_rule = policy.AndCheck([attr_rule, _build_subattr_match_rule(attribute_name, attribute, action, target)]) match_rule = policy.AndCheck([match_rule, attr_rule]) return match_rule
[ "def", "_build_match_rule", "(", "action", ",", "target", ",", "pluralized", ")", ":", "match_rule", "=", "policy", ".", "RuleCheck", "(", "'rule'", ",", "action", ")", "(", "resource", ",", "enforce_attr_based_check", ")", "=", "get_resource_and_action", "(", "action", ",", "pluralized", ")", "if", "enforce_attr_based_check", ":", "res_map", "=", "attributes", ".", "RESOURCE_ATTRIBUTE_MAP", "if", "(", "resource", "in", "res_map", ")", ":", "for", "attribute_name", "in", "res_map", "[", "resource", "]", ":", "if", "_is_attribute_explicitly_set", "(", "attribute_name", ",", "res_map", "[", "resource", "]", ",", "target", ",", "action", ")", ":", "attribute", "=", "res_map", "[", "resource", "]", "[", "attribute_name", "]", "if", "(", "'enforce_policy'", "in", "attribute", ")", ":", "attr_rule", "=", "policy", ".", "RuleCheck", "(", "'rule'", ",", "(", "'%s:%s'", "%", "(", "action", ",", "attribute_name", ")", ")", ")", "if", "_should_validate_sub_attributes", "(", "attribute", ",", "target", "[", "attribute_name", "]", ")", ":", "attr_rule", "=", "policy", ".", "AndCheck", "(", "[", "attr_rule", ",", "_build_subattr_match_rule", "(", "attribute_name", ",", "attribute", ",", "action", ",", "target", ")", "]", ")", "match_rule", "=", "policy", ".", "AndCheck", "(", "[", "match_rule", ",", "attr_rule", "]", ")", "return", "match_rule" ]
create the rule to match for a given action .
train
false
119
def dmp_ground_LC(f, u, K): while u: f = dmp_LC(f, K) u -= 1 return dup_LC(f, K)
[ "def", "dmp_ground_LC", "(", "f", ",", "u", ",", "K", ")", ":", "while", "u", ":", "f", "=", "dmp_LC", "(", "f", ",", "K", ")", "u", "-=", "1", "return", "dup_LC", "(", "f", ",", "K", ")" ]
return the ground leading coefficient .
train
false
120
def getNewRepository(): return ExportRepository()
[ "def", "getNewRepository", "(", ")", ":", "return", "ExportRepository", "(", ")" ]
get new repository .
train
false
122
def get_entrance_exam_score(request, course): exam_key = UsageKey.from_string(course.entrance_exam_id) exam_descriptor = modulestore().get_item(exam_key) def inner_get_module(descriptor): '\n Delegate to get_module_for_descriptor (imported here to avoid circular reference)\n ' from courseware.module_render import get_module_for_descriptor field_data_cache = FieldDataCache([descriptor], course.id, request.user) return get_module_for_descriptor(request.user, request, descriptor, field_data_cache, course.id, course=course) exam_module_generators = yield_dynamic_descriptor_descendants(exam_descriptor, request.user.id, inner_get_module) exam_modules = [module for module in exam_module_generators] return _calculate_entrance_exam_score(request.user, course, exam_modules)
[ "def", "get_entrance_exam_score", "(", "request", ",", "course", ")", ":", "exam_key", "=", "UsageKey", ".", "from_string", "(", "course", ".", "entrance_exam_id", ")", "exam_descriptor", "=", "modulestore", "(", ")", ".", "get_item", "(", "exam_key", ")", "def", "inner_get_module", "(", "descriptor", ")", ":", "from", "courseware", ".", "module_render", "import", "get_module_for_descriptor", "field_data_cache", "=", "FieldDataCache", "(", "[", "descriptor", "]", ",", "course", ".", "id", ",", "request", ".", "user", ")", "return", "get_module_for_descriptor", "(", "request", ".", "user", ",", "request", ",", "descriptor", ",", "field_data_cache", ",", "course", ".", "id", ",", "course", "=", "course", ")", "exam_module_generators", "=", "yield_dynamic_descriptor_descendants", "(", "exam_descriptor", ",", "request", ".", "user", ".", "id", ",", "inner_get_module", ")", "exam_modules", "=", "[", "module", "for", "module", "in", "exam_module_generators", "]", "return", "_calculate_entrance_exam_score", "(", "request", ".", "user", ",", "course", ",", "exam_modules", ")" ]
gather the set of modules which comprise the entrance exam note that request may not actually be a genuine request .
train
false
124
def load_passphrase_from_file(): vf_path = os.path.expanduser(kVFPassphraseFile) assert (os.access(vf_path, os.F_OK) and os.access(vf_path, os.R_OK)), ('%s must exist and be readable' % vf_path) with open(vf_path) as f: user_data = f.read() return user_data.strip('\n')
[ "def", "load_passphrase_from_file", "(", ")", ":", "vf_path", "=", "os", ".", "path", ".", "expanduser", "(", "kVFPassphraseFile", ")", "assert", "(", "os", ".", "access", "(", "vf_path", ",", "os", ".", "F_OK", ")", "and", "os", ".", "access", "(", "vf_path", ",", "os", ".", "R_OK", ")", ")", ",", "(", "'%s must exist and be readable'", "%", "vf_path", ")", "with", "open", "(", "vf_path", ")", "as", "f", ":", "user_data", "=", "f", ".", "read", "(", ")", "return", "user_data", ".", "strip", "(", "'\\n'", ")" ]
read the viewfinder passphrase from local file .
train
false
125
def describe_identity_pools(IdentityPoolName, IdentityPoolId=None, region=None, key=None, keyid=None, profile=None): conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: ids = _find_identity_pool_ids(IdentityPoolName, IdentityPoolId, conn) if ids: results = [] for pool_id in ids: response = conn.describe_identity_pool(IdentityPoolId=pool_id) response.pop('ResponseMetadata', None) results.append(response) return {'identity_pools': results} else: return {'identity_pools': None} except ClientError as e: return {'error': salt.utils.boto3.get_error(e)}
[ "def", "describe_identity_pools", "(", "IdentityPoolName", ",", "IdentityPoolId", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "try", ":", "ids", "=", "_find_identity_pool_ids", "(", "IdentityPoolName", ",", "IdentityPoolId", ",", "conn", ")", "if", "ids", ":", "results", "=", "[", "]", "for", "pool_id", "in", "ids", ":", "response", "=", "conn", ".", "describe_identity_pool", "(", "IdentityPoolId", "=", "pool_id", ")", "response", ".", "pop", "(", "'ResponseMetadata'", ",", "None", ")", "results", ".", "append", "(", "response", ")", "return", "{", "'identity_pools'", ":", "results", "}", "else", ":", "return", "{", "'identity_pools'", ":", "None", "}", "except", "ClientError", "as", "e", ":", "return", "{", "'error'", ":", "salt", ".", "utils", ".", "boto3", ".", "get_error", "(", "e", ")", "}" ]
given an identity pool name .
train
false
126
@register.simple_tag(takes_context=True) def zinnia_loop_template(context, default_template): (matching, context_object) = get_context_first_matching_object(context, ['category', 'tag', 'author', 'pattern', 'year', 'month', 'week', 'day']) context_positions = get_context_loop_positions(context) templates = loop_template_list(context_positions, context_object, matching, default_template, ENTRY_LOOP_TEMPLATES) return select_template(templates)
[ "@", "register", ".", "simple_tag", "(", "takes_context", "=", "True", ")", "def", "zinnia_loop_template", "(", "context", ",", "default_template", ")", ":", "(", "matching", ",", "context_object", ")", "=", "get_context_first_matching_object", "(", "context", ",", "[", "'category'", ",", "'tag'", ",", "'author'", ",", "'pattern'", ",", "'year'", ",", "'month'", ",", "'week'", ",", "'day'", "]", ")", "context_positions", "=", "get_context_loop_positions", "(", "context", ")", "templates", "=", "loop_template_list", "(", "context_positions", ",", "context_object", ",", "matching", ",", "default_template", ",", "ENTRY_LOOP_TEMPLATES", ")", "return", "select_template", "(", "templates", ")" ]
return a selected template from his position within a loop and the filtering context .
train
false
127
def loadExperimentDescriptionScriptFromDir(experimentDir): descriptionScriptPath = os.path.join(experimentDir, 'description.py') module = _loadDescriptionFile(descriptionScriptPath) return module
[ "def", "loadExperimentDescriptionScriptFromDir", "(", "experimentDir", ")", ":", "descriptionScriptPath", "=", "os", ".", "path", ".", "join", "(", "experimentDir", ",", "'description.py'", ")", "module", "=", "_loadDescriptionFile", "(", "descriptionScriptPath", ")", "return", "module" ]
loads the experiment description python script from the given experiment directory .
train
true
128
def token_list_to_text(tokenlist): ZeroWidthEscape = Token.ZeroWidthEscape return u''.join((item[1] for item in tokenlist if (item[0] != ZeroWidthEscape)))
[ "def", "token_list_to_text", "(", "tokenlist", ")", ":", "ZeroWidthEscape", "=", "Token", ".", "ZeroWidthEscape", "return", "u''", ".", "join", "(", "(", "item", "[", "1", "]", "for", "item", "in", "tokenlist", "if", "(", "item", "[", "0", "]", "!=", "ZeroWidthEscape", ")", ")", ")" ]
concatenate all the text parts again .
train
true
129
@profiler.trace def transfer_list(request, detailed=True, search_opts=None): c_client = cinderclient(request) try: return [VolumeTransfer(v) for v in c_client.transfers.list(detailed=detailed, search_opts=search_opts)] except cinder_exception.Forbidden as error: LOG.error(error) return []
[ "@", "profiler", ".", "trace", "def", "transfer_list", "(", "request", ",", "detailed", "=", "True", ",", "search_opts", "=", "None", ")", ":", "c_client", "=", "cinderclient", "(", "request", ")", "try", ":", "return", "[", "VolumeTransfer", "(", "v", ")", "for", "v", "in", "c_client", ".", "transfers", ".", "list", "(", "detailed", "=", "detailed", ",", "search_opts", "=", "search_opts", ")", "]", "except", "cinder_exception", ".", "Forbidden", "as", "error", ":", "LOG", ".", "error", "(", "error", ")", "return", "[", "]" ]
to see all volumes transfers as an admin pass in a special search option: {all_tenants: 1} .
train
true
131
def test_discretize_callable_1d(): def f(x): return (x ** 2) y = discretize_model(f, ((-5), 6)) assert_allclose(y, (np.arange((-5), 6) ** 2))
[ "def", "test_discretize_callable_1d", "(", ")", ":", "def", "f", "(", "x", ")", ":", "return", "(", "x", "**", "2", ")", "y", "=", "discretize_model", "(", "f", ",", "(", "(", "-", "5", ")", ",", "6", ")", ")", "assert_allclose", "(", "y", ",", "(", "np", ".", "arange", "(", "(", "-", "5", ")", ",", "6", ")", "**", "2", ")", ")" ]
test discretize when a 1d function is passed .
train
false

Dataset Card for "python-150_interduplication"

More Information needed

Downloads last month
4
Edit dataset card