Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
1,200
lexich/yandex-disk-webdav
yandexwebdav.py
Config.downloadTo
def downloadTo(self, href, localpath): """ Download file to localstorage :param href: remote path :param localpath: local path :return: response """ for iTry in range(TRYINGS): logger.info(u("downloadTo(%s): %s %s") % (iTry, href, localpath)) try: href = remote(href) localpath = _(localpath) conn = self.getConnection() conn.request("GET", _encode_utf8(href), "", self.getHeaders()) response = conn.getresponse() checkResponse(response) f = None try: while True: data = _decode_utf8(response.read(1024)) if not data: break if data == u('resource not found'): return False if not f: f = open(localpath, "w") f.write(data) finally: if f: f.close() return True except ConnectionException: raise except Exception: e = sys.exc_info()[1] logger.exception(e)
python
def downloadTo(self, href, localpath): """ Download file to localstorage :param href: remote path :param localpath: local path :return: response """ for iTry in range(TRYINGS): logger.info(u("downloadTo(%s): %s %s") % (iTry, href, localpath)) try: href = remote(href) localpath = _(localpath) conn = self.getConnection() conn.request("GET", _encode_utf8(href), "", self.getHeaders()) response = conn.getresponse() checkResponse(response) f = None try: while True: data = _decode_utf8(response.read(1024)) if not data: break if data == u('resource not found'): return False if not f: f = open(localpath, "w") f.write(data) finally: if f: f.close() return True except ConnectionException: raise except Exception: e = sys.exc_info()[1] logger.exception(e)
['def', 'downloadTo', '(', 'self', ',', 'href', ',', 'localpath', ')', ':', 'for', 'iTry', 'in', 'range', '(', 'TRYINGS', ')', ':', 'logger', '.', 'info', '(', 'u', '(', '"downloadTo(%s): %s %s"', ')', '%', '(', 'iTry', ',', 'href', ',', 'localpath', ')', ')', 'try', ':', 'href', '=', 'remote', '(', 'href', ')', 'localpath', '=', '_', '(', 'localpath', ')', 'conn', '=', 'self', '.', 'getConnection', '(', ')', 'conn', '.', 'request', '(', '"GET"', ',', '_encode_utf8', '(', 'href', ')', ',', '""', ',', 'self', '.', 'getHeaders', '(', ')', ')', 'response', '=', 'conn', '.', 'getresponse', '(', ')', 'checkResponse', '(', 'response', ')', 'f', '=', 'None', 'try', ':', 'while', 'True', ':', 'data', '=', '_decode_utf8', '(', 'response', '.', 'read', '(', '1024', ')', ')', 'if', 'not', 'data', ':', 'break', 'if', 'data', '==', 'u', '(', "'resource not found'", ')', ':', 'return', 'False', 'if', 'not', 'f', ':', 'f', '=', 'open', '(', 'localpath', ',', '"w"', ')', 'f', '.', 'write', '(', 'data', ')', 'finally', ':', 'if', 'f', ':', 'f', '.', 'close', '(', ')', 'return', 'True', 'except', 'ConnectionException', ':', 'raise', 'except', 'Exception', ':', 'e', '=', 'sys', '.', 'exc_info', '(', ')', '[', '1', ']', 'logger', '.', 'exception', '(', 'e', ')']
Download file to localstorage :param href: remote path :param localpath: local path :return: response
['Download', 'file', 'to', 'localstorage', ':', 'param', 'href', ':', 'remote', 'path', ':', 'param', 'localpath', ':', 'local', 'path', ':', 'return', ':', 'response']
train
https://github.com/lexich/yandex-disk-webdav/blob/669f51f999ed14e137454b90e7d035e2ca171c75/yandexwebdav.py#L362-L398
1,201
rwl/pylon
pylon/ac_pf.py
FastDecoupledPF._p_iteration
def _p_iteration(self, P, Bp_solver, Vm, Va, pvpq): """ Performs a P iteration, updates Va. """ dVa = -Bp_solver.solve(P) # Update voltage. Va[pvpq] = Va[pvpq] + dVa V = Vm * exp(1j * Va) return V, Vm, Va
python
def _p_iteration(self, P, Bp_solver, Vm, Va, pvpq): """ Performs a P iteration, updates Va. """ dVa = -Bp_solver.solve(P) # Update voltage. Va[pvpq] = Va[pvpq] + dVa V = Vm * exp(1j * Va) return V, Vm, Va
['def', '_p_iteration', '(', 'self', ',', 'P', ',', 'Bp_solver', ',', 'Vm', ',', 'Va', ',', 'pvpq', ')', ':', 'dVa', '=', '-', 'Bp_solver', '.', 'solve', '(', 'P', ')', '# Update voltage.', 'Va', '[', 'pvpq', ']', '=', 'Va', '[', 'pvpq', ']', '+', 'dVa', 'V', '=', 'Vm', '*', 'exp', '(', '1j', '*', 'Va', ')', 'return', 'V', ',', 'Vm', ',', 'Va']
Performs a P iteration, updates Va.
['Performs', 'a', 'P', 'iteration', 'updates', 'Va', '.']
train
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/ac_pf.py#L479-L488
1,202
mila/pyoo
pyoo.py
Chart.change_type
def change_type(self, cls): """ Change type of diagram in this chart. Accepts one of classes which extend Diagram. """ target_type = cls._type target = self._embedded.createInstance(target_type) self._embedded.setDiagram(target) return cls(target)
python
def change_type(self, cls): """ Change type of diagram in this chart. Accepts one of classes which extend Diagram. """ target_type = cls._type target = self._embedded.createInstance(target_type) self._embedded.setDiagram(target) return cls(target)
['def', 'change_type', '(', 'self', ',', 'cls', ')', ':', 'target_type', '=', 'cls', '.', '_type', 'target', '=', 'self', '.', '_embedded', '.', 'createInstance', '(', 'target_type', ')', 'self', '.', '_embedded', '.', 'setDiagram', '(', 'target', ')', 'return', 'cls', '(', 'target', ')']
Change type of diagram in this chart. Accepts one of classes which extend Diagram.
['Change', 'type', 'of', 'diagram', 'in', 'this', 'chart', '.']
train
https://github.com/mila/pyoo/blob/1e024999f608c87ea72cd443e39c89eb0ba3cc62/pyoo.py#L824-L833
1,203
the-tale/pynames
pynames/from_tables_generator.py
FromCSVTablesGenerator.source_loader
def source_loader(self, source_paths, create_missing_tables=True): """Load source from 3 csv files. First file should contain global settings: * ``native_lagnauge,languages`` header on first row * appropriate values on following rows Example:: native_lagnauge,languages ru,ru ,en Second file should contain templates: * ``template_name,probability,genders,template`` header on first row * appropriate values on following rows (separate values with semicolon ";" in template column) Example:: template_name,probability,genders,template male_1,5,m,prefixes;male_suffixes baby_1,1,m;f,prefixes;descriptive Third file should contain tables with values for template slugs in all languages: * first row should contain slugs with language code after colon for each * appropriate values on following rows. Multiple forms may be specified using semicolon as separator Example:: prefixes:ru,prefixes:en,male_suffixes:ru,male_suffixes:en,descriptive:ru,descriptive:en Бж,Bzh,пра,pra,быстряк;быстряку,fasty дон;дону,don,Иван;Ивану,Ivan,Иванов;Иванову,Ivanov Note: you may use slugs without ":lang_code" suffix in csv header of tables file. Such headers will be treated as headers for native language If tables are missing for some slug then it is automatically created with values equeal to slug itself. So you may use some slugs without specifying tables data for them. Example for apostrophe and space: male_1,5,m,prefixes;';male_suffixes male_full,5,m,first_name; ;last_name """ if not isinstance(source_paths, Iterable) or len(source_paths) < 3: raise TypeError('FromCSVTablesGenerator.source_loader accepts list of 3 paths as argument. Got `%s` instead' % source_paths) self.native_language = '' self.languages = [] self.templates = [] self.tables = {} self.load_settings(source_paths[0]) template_slugs = self.load_templates(source_paths[1]) self.load_tables(source_paths[2]) if create_missing_tables: self.create_missing_tables(template_slugs) self.full_forms_for_languages = set()
python
def source_loader(self, source_paths, create_missing_tables=True): """Load source from 3 csv files. First file should contain global settings: * ``native_lagnauge,languages`` header on first row * appropriate values on following rows Example:: native_lagnauge,languages ru,ru ,en Second file should contain templates: * ``template_name,probability,genders,template`` header on first row * appropriate values on following rows (separate values with semicolon ";" in template column) Example:: template_name,probability,genders,template male_1,5,m,prefixes;male_suffixes baby_1,1,m;f,prefixes;descriptive Third file should contain tables with values for template slugs in all languages: * first row should contain slugs with language code after colon for each * appropriate values on following rows. Multiple forms may be specified using semicolon as separator Example:: prefixes:ru,prefixes:en,male_suffixes:ru,male_suffixes:en,descriptive:ru,descriptive:en Бж,Bzh,пра,pra,быстряк;быстряку,fasty дон;дону,don,Иван;Ивану,Ivan,Иванов;Иванову,Ivanov Note: you may use slugs without ":lang_code" suffix in csv header of tables file. Such headers will be treated as headers for native language If tables are missing for some slug then it is automatically created with values equeal to slug itself. So you may use some slugs without specifying tables data for them. Example for apostrophe and space: male_1,5,m,prefixes;';male_suffixes male_full,5,m,first_name; ;last_name """ if not isinstance(source_paths, Iterable) or len(source_paths) < 3: raise TypeError('FromCSVTablesGenerator.source_loader accepts list of 3 paths as argument. Got `%s` instead' % source_paths) self.native_language = '' self.languages = [] self.templates = [] self.tables = {} self.load_settings(source_paths[0]) template_slugs = self.load_templates(source_paths[1]) self.load_tables(source_paths[2]) if create_missing_tables: self.create_missing_tables(template_slugs) self.full_forms_for_languages = set()
['def', 'source_loader', '(', 'self', ',', 'source_paths', ',', 'create_missing_tables', '=', 'True', ')', ':', 'if', 'not', 'isinstance', '(', 'source_paths', ',', 'Iterable', ')', 'or', 'len', '(', 'source_paths', ')', '<', '3', ':', 'raise', 'TypeError', '(', "'FromCSVTablesGenerator.source_loader accepts list of 3 paths as argument. Got `%s` instead'", '%', 'source_paths', ')', 'self', '.', 'native_language', '=', "''", 'self', '.', 'languages', '=', '[', ']', 'self', '.', 'templates', '=', '[', ']', 'self', '.', 'tables', '=', '{', '}', 'self', '.', 'load_settings', '(', 'source_paths', '[', '0', ']', ')', 'template_slugs', '=', 'self', '.', 'load_templates', '(', 'source_paths', '[', '1', ']', ')', 'self', '.', 'load_tables', '(', 'source_paths', '[', '2', ']', ')', 'if', 'create_missing_tables', ':', 'self', '.', 'create_missing_tables', '(', 'template_slugs', ')', 'self', '.', 'full_forms_for_languages', '=', 'set', '(', ')']
Load source from 3 csv files. First file should contain global settings: * ``native_lagnauge,languages`` header on first row * appropriate values on following rows Example:: native_lagnauge,languages ru,ru ,en Second file should contain templates: * ``template_name,probability,genders,template`` header on first row * appropriate values on following rows (separate values with semicolon ";" in template column) Example:: template_name,probability,genders,template male_1,5,m,prefixes;male_suffixes baby_1,1,m;f,prefixes;descriptive Third file should contain tables with values for template slugs in all languages: * first row should contain slugs with language code after colon for each * appropriate values on following rows. Multiple forms may be specified using semicolon as separator Example:: prefixes:ru,prefixes:en,male_suffixes:ru,male_suffixes:en,descriptive:ru,descriptive:en Бж,Bzh,пра,pra,быстряк;быстряку,fasty дон;дону,don,Иван;Ивану,Ivan,Иванов;Иванову,Ivanov Note: you may use slugs without ":lang_code" suffix in csv header of tables file. Such headers will be treated as headers for native language If tables are missing for some slug then it is automatically created with values equeal to slug itself. So you may use some slugs without specifying tables data for them. Example for apostrophe and space: male_1,5,m,prefixes;';male_suffixes male_full,5,m,first_name; ;last_name
['Load', 'source', 'from', '3', 'csv', 'files', '.']
train
https://github.com/the-tale/pynames/blob/da45eaaac3166847bcb2c48cab4571a462660ace/pynames/from_tables_generator.py#L179-L238
1,204
daniellawrence/graphitesend
graphitesend/graphitesend.py
GraphiteClient.autoreconnect
def autoreconnect(self, sleep=1, attempt=3, exponential=True, jitter=5): """ Tries to reconnect with some delay: exponential=False: up to `attempt` times with `sleep` seconds between each try exponential=True: up to `attempt` times with exponential growing `sleep` and random delay in range 1..`jitter` (exponential backoff) :param sleep: time to sleep between two attempts to reconnect :type sleep: float or int :param attempt: maximal number of attempts :type attempt: int :param exponential: if set - use exponential backoff logic :type exponential: bool :param jitter: top value of random delay, sec :type jitter: int """ p = 0 while attempt is None or attempt > 0: try: self.reconnect() return True except GraphiteSendException: if exponential: p += 1 time.sleep(pow(sleep, p) + random.randint(1, jitter)) else: time.sleep(sleep) attempt -= 1 return False
python
def autoreconnect(self, sleep=1, attempt=3, exponential=True, jitter=5): """ Tries to reconnect with some delay: exponential=False: up to `attempt` times with `sleep` seconds between each try exponential=True: up to `attempt` times with exponential growing `sleep` and random delay in range 1..`jitter` (exponential backoff) :param sleep: time to sleep between two attempts to reconnect :type sleep: float or int :param attempt: maximal number of attempts :type attempt: int :param exponential: if set - use exponential backoff logic :type exponential: bool :param jitter: top value of random delay, sec :type jitter: int """ p = 0 while attempt is None or attempt > 0: try: self.reconnect() return True except GraphiteSendException: if exponential: p += 1 time.sleep(pow(sleep, p) + random.randint(1, jitter)) else: time.sleep(sleep) attempt -= 1 return False
['def', 'autoreconnect', '(', 'self', ',', 'sleep', '=', '1', ',', 'attempt', '=', '3', ',', 'exponential', '=', 'True', ',', 'jitter', '=', '5', ')', ':', 'p', '=', '0', 'while', 'attempt', 'is', 'None', 'or', 'attempt', '>', '0', ':', 'try', ':', 'self', '.', 'reconnect', '(', ')', 'return', 'True', 'except', 'GraphiteSendException', ':', 'if', 'exponential', ':', 'p', '+=', '1', 'time', '.', 'sleep', '(', 'pow', '(', 'sleep', ',', 'p', ')', '+', 'random', '.', 'randint', '(', '1', ',', 'jitter', ')', ')', 'else', ':', 'time', '.', 'sleep', '(', 'sleep', ')', 'attempt', '-=', '1', 'return', 'False']
Tries to reconnect with some delay: exponential=False: up to `attempt` times with `sleep` seconds between each try exponential=True: up to `attempt` times with exponential growing `sleep` and random delay in range 1..`jitter` (exponential backoff) :param sleep: time to sleep between two attempts to reconnect :type sleep: float or int :param attempt: maximal number of attempts :type attempt: int :param exponential: if set - use exponential backoff logic :type exponential: bool :param jitter: top value of random delay, sec :type jitter: int
['Tries', 'to', 'reconnect', 'with', 'some', 'delay', ':']
train
https://github.com/daniellawrence/graphitesend/blob/02281263e642f9b6e146886d4544e1d7aebd7753/graphitesend/graphitesend.py#L175-L213
1,205
tBaxter/tango-comments
build/lib/tango_comments/forms.py
CommentSecurityForm.generate_security_hash
def generate_security_hash(self, content_type, object_pk, timestamp): """ Generate a HMAC security hash from the provided info. """ info = (content_type, object_pk, timestamp) key_salt = "django.contrib.forms.CommentSecurityForm" value = "-".join(info) return salted_hmac(key_salt, value).hexdigest()
python
def generate_security_hash(self, content_type, object_pk, timestamp): """ Generate a HMAC security hash from the provided info. """ info = (content_type, object_pk, timestamp) key_salt = "django.contrib.forms.CommentSecurityForm" value = "-".join(info) return salted_hmac(key_salt, value).hexdigest()
['def', 'generate_security_hash', '(', 'self', ',', 'content_type', ',', 'object_pk', ',', 'timestamp', ')', ':', 'info', '=', '(', 'content_type', ',', 'object_pk', ',', 'timestamp', ')', 'key_salt', '=', '"django.contrib.forms.CommentSecurityForm"', 'value', '=', '"-"', '.', 'join', '(', 'info', ')', 'return', 'salted_hmac', '(', 'key_salt', ',', 'value', ')', '.', 'hexdigest', '(', ')']
Generate a HMAC security hash from the provided info.
['Generate', 'a', 'HMAC', 'security', 'hash', 'from', 'the', 'provided', 'info', '.']
train
https://github.com/tBaxter/tango-comments/blob/1fd335c6fc9e81bba158e42e1483f1a149622ab4/build/lib/tango_comments/forms.py#L86-L93
1,206
mitsei/dlkit
dlkit/json_/repository/objects.py
AssetContentForm.clear_url
def clear_url(self): """Removes the url. raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.repository.AssetContentForm.clear_url_template if (self.get_url_metadata().is_read_only() or self.get_url_metadata().is_required()): raise errors.NoAccess() self._my_map['url'] = self._url_default
python
def clear_url(self): """Removes the url. raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.repository.AssetContentForm.clear_url_template if (self.get_url_metadata().is_read_only() or self.get_url_metadata().is_required()): raise errors.NoAccess() self._my_map['url'] = self._url_default
['def', 'clear_url', '(', 'self', ')', ':', '# Implemented from template for osid.repository.AssetContentForm.clear_url_template', 'if', '(', 'self', '.', 'get_url_metadata', '(', ')', '.', 'is_read_only', '(', ')', 'or', 'self', '.', 'get_url_metadata', '(', ')', '.', 'is_required', '(', ')', ')', ':', 'raise', 'errors', '.', 'NoAccess', '(', ')', 'self', '.', '_my_map', '[', "'url'", ']', '=', 'self', '.', '_url_default']
Removes the url. raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.*
['Removes', 'the', 'url', '.']
train
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/objects.py#L1727-L1739
1,207
pypa/pipenv
pipenv/vendor/urllib3/util/ssl_.py
is_ipaddress
def is_ipaddress(hostname): """Detects whether the hostname given is an IP address. :param str hostname: Hostname to examine. :return: True if the hostname is an IP address, False otherwise. """ if six.PY3 and isinstance(hostname, bytes): # IDN A-label bytes are ASCII compatible. hostname = hostname.decode('ascii') families = [socket.AF_INET] if hasattr(socket, 'AF_INET6'): families.append(socket.AF_INET6) for af in families: try: inet_pton(af, hostname) except (socket.error, ValueError, OSError): pass else: return True return False
python
def is_ipaddress(hostname): """Detects whether the hostname given is an IP address. :param str hostname: Hostname to examine. :return: True if the hostname is an IP address, False otherwise. """ if six.PY3 and isinstance(hostname, bytes): # IDN A-label bytes are ASCII compatible. hostname = hostname.decode('ascii') families = [socket.AF_INET] if hasattr(socket, 'AF_INET6'): families.append(socket.AF_INET6) for af in families: try: inet_pton(af, hostname) except (socket.error, ValueError, OSError): pass else: return True return False
['def', 'is_ipaddress', '(', 'hostname', ')', ':', 'if', 'six', '.', 'PY3', 'and', 'isinstance', '(', 'hostname', ',', 'bytes', ')', ':', '# IDN A-label bytes are ASCII compatible.', 'hostname', '=', 'hostname', '.', 'decode', '(', "'ascii'", ')', 'families', '=', '[', 'socket', '.', 'AF_INET', ']', 'if', 'hasattr', '(', 'socket', ',', "'AF_INET6'", ')', ':', 'families', '.', 'append', '(', 'socket', '.', 'AF_INET6', ')', 'for', 'af', 'in', 'families', ':', 'try', ':', 'inet_pton', '(', 'af', ',', 'hostname', ')', 'except', '(', 'socket', '.', 'error', ',', 'ValueError', ',', 'OSError', ')', ':', 'pass', 'else', ':', 'return', 'True', 'return', 'False']
Detects whether the hostname given is an IP address. :param str hostname: Hostname to examine. :return: True if the hostname is an IP address, False otherwise.
['Detects', 'whether', 'the', 'hostname', 'given', 'is', 'an', 'IP', 'address', '.']
train
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/urllib3/util/ssl_.py#L360-L381
1,208
Carbonara-Project/Guanciale
guanciale/idblib.py
idaunpack
def idaunpack(buf): """ Special data packing format, used in struct definitions, and .id2 files sdk functions: pack_dd etc. """ buf = bytearray(buf) def nextval(o): val = buf[o] ; o += 1 if val == 0xff: # 32 bit value val, = struct.unpack_from(">L", buf, o) o += 4 return val, o if val < 0x80: # 7 bit value return val, o val <<= 8 val |= buf[o] ; o += 1 if val < 0xc000: # 14 bit value return val & 0x3fff, o # 29 bit value val <<= 8 val |= buf[o] ; o += 1 val <<= 8 val |= buf[o] ; o += 1 return val & 0x1fffffff, o values = [] o = 0 while o < len(buf): val, o = nextval(o) values.append(val) return values
python
def idaunpack(buf): """ Special data packing format, used in struct definitions, and .id2 files sdk functions: pack_dd etc. """ buf = bytearray(buf) def nextval(o): val = buf[o] ; o += 1 if val == 0xff: # 32 bit value val, = struct.unpack_from(">L", buf, o) o += 4 return val, o if val < 0x80: # 7 bit value return val, o val <<= 8 val |= buf[o] ; o += 1 if val < 0xc000: # 14 bit value return val & 0x3fff, o # 29 bit value val <<= 8 val |= buf[o] ; o += 1 val <<= 8 val |= buf[o] ; o += 1 return val & 0x1fffffff, o values = [] o = 0 while o < len(buf): val, o = nextval(o) values.append(val) return values
['def', 'idaunpack', '(', 'buf', ')', ':', 'buf', '=', 'bytearray', '(', 'buf', ')', 'def', 'nextval', '(', 'o', ')', ':', 'val', '=', 'buf', '[', 'o', ']', 'o', '+=', '1', 'if', 'val', '==', '0xff', ':', '# 32 bit value\r', 'val', ',', '=', 'struct', '.', 'unpack_from', '(', '">L"', ',', 'buf', ',', 'o', ')', 'o', '+=', '4', 'return', 'val', ',', 'o', 'if', 'val', '<', '0x80', ':', '# 7 bit value\r', 'return', 'val', ',', 'o', 'val', '<<=', '8', 'val', '|=', 'buf', '[', 'o', ']', 'o', '+=', '1', 'if', 'val', '<', '0xc000', ':', '# 14 bit value\r', 'return', 'val', '&', '0x3fff', ',', 'o', '# 29 bit value\r', 'val', '<<=', '8', 'val', '|=', 'buf', '[', 'o', ']', 'o', '+=', '1', 'val', '<<=', '8', 'val', '|=', 'buf', '[', 'o', ']', 'o', '+=', '1', 'return', 'val', '&', '0x1fffffff', ',', 'o', 'values', '=', '[', ']', 'o', '=', '0', 'while', 'o', '<', 'len', '(', 'buf', ')', ':', 'val', ',', 'o', '=', 'nextval', '(', 'o', ')', 'values', '.', 'append', '(', 'val', ')', 'return', 'values']
Special data packing format, used in struct definitions, and .id2 files sdk functions: pack_dd etc.
['Special', 'data', 'packing', 'format', 'used', 'in', 'struct', 'definitions', 'and', '.', 'id2', 'files', 'sdk', 'functions', ':', 'pack_dd', 'etc', '.']
train
https://github.com/Carbonara-Project/Guanciale/blob/c239ffac6fb481d09c4071d1de1a09f60dc584ab/guanciale/idblib.py#L161-L194
1,209
ArabellaTech/django-basic-cms
basic_cms/utils.py
json_to_pages
def json_to_pages(json, user, preferred_lang=None): """ Attept to create/update pages from JSON string json. user is the user that will be used when creating a page if a page's original author can't be found. preferred_lang is the language code of the slugs to include in error messages (defaults to settings.PAGE_DEFAULT_LANGUAGE). Returns (errors, pages_created) where errors is a list of strings and pages_created is a list of: (page object, created bool, messages list of strings) tuples. If any errors are detected there the error list will contain information for the user and no pages will be created/updated. """ from .models import Page if not preferred_lang: preferred_lang = settings.PAGE_DEFAULT_LANGUAGE d = simplejson.loads(json) try: errors = validate_pages_json_data(d, preferred_lang) except KeyError as e: errors = [_('JSON file is invalid: %s') % (e.args[0],)] pages_created = [] if not errors: # pass one for p in d['pages']: pages_created.append( Page.objects.create_and_update_from_json_data(p, user)) # pass two for p, results in zip(d['pages'], pages_created): page, created, messages = results rtcs = p['redirect_to_complete_slug'] if rtcs: messages.extend(page.update_redirect_to_from_json(rtcs)) # clean up MPTT links #Page.objects.rebuild() return errors, pages_created
python
def json_to_pages(json, user, preferred_lang=None): """ Attept to create/update pages from JSON string json. user is the user that will be used when creating a page if a page's original author can't be found. preferred_lang is the language code of the slugs to include in error messages (defaults to settings.PAGE_DEFAULT_LANGUAGE). Returns (errors, pages_created) where errors is a list of strings and pages_created is a list of: (page object, created bool, messages list of strings) tuples. If any errors are detected there the error list will contain information for the user and no pages will be created/updated. """ from .models import Page if not preferred_lang: preferred_lang = settings.PAGE_DEFAULT_LANGUAGE d = simplejson.loads(json) try: errors = validate_pages_json_data(d, preferred_lang) except KeyError as e: errors = [_('JSON file is invalid: %s') % (e.args[0],)] pages_created = [] if not errors: # pass one for p in d['pages']: pages_created.append( Page.objects.create_and_update_from_json_data(p, user)) # pass two for p, results in zip(d['pages'], pages_created): page, created, messages = results rtcs = p['redirect_to_complete_slug'] if rtcs: messages.extend(page.update_redirect_to_from_json(rtcs)) # clean up MPTT links #Page.objects.rebuild() return errors, pages_created
['def', 'json_to_pages', '(', 'json', ',', 'user', ',', 'preferred_lang', '=', 'None', ')', ':', 'from', '.', 'models', 'import', 'Page', 'if', 'not', 'preferred_lang', ':', 'preferred_lang', '=', 'settings', '.', 'PAGE_DEFAULT_LANGUAGE', 'd', '=', 'simplejson', '.', 'loads', '(', 'json', ')', 'try', ':', 'errors', '=', 'validate_pages_json_data', '(', 'd', ',', 'preferred_lang', ')', 'except', 'KeyError', 'as', 'e', ':', 'errors', '=', '[', '_', '(', "'JSON file is invalid: %s'", ')', '%', '(', 'e', '.', 'args', '[', '0', ']', ',', ')', ']', 'pages_created', '=', '[', ']', 'if', 'not', 'errors', ':', '# pass one', 'for', 'p', 'in', 'd', '[', "'pages'", ']', ':', 'pages_created', '.', 'append', '(', 'Page', '.', 'objects', '.', 'create_and_update_from_json_data', '(', 'p', ',', 'user', ')', ')', '# pass two', 'for', 'p', ',', 'results', 'in', 'zip', '(', 'd', '[', "'pages'", ']', ',', 'pages_created', ')', ':', 'page', ',', 'created', ',', 'messages', '=', 'results', 'rtcs', '=', 'p', '[', "'redirect_to_complete_slug'", ']', 'if', 'rtcs', ':', 'messages', '.', 'extend', '(', 'page', '.', 'update_redirect_to_from_json', '(', 'rtcs', ')', ')', '# clean up MPTT links', '#Page.objects.rebuild()', 'return', 'errors', ',', 'pages_created']
Attept to create/update pages from JSON string json. user is the user that will be used when creating a page if a page's original author can't be found. preferred_lang is the language code of the slugs to include in error messages (defaults to settings.PAGE_DEFAULT_LANGUAGE). Returns (errors, pages_created) where errors is a list of strings and pages_created is a list of: (page object, created bool, messages list of strings) tuples. If any errors are detected there the error list will contain information for the user and no pages will be created/updated.
['Attept', 'to', 'create', '/', 'update', 'pages', 'from', 'JSON', 'string', 'json', '.', 'user', 'is', 'the', 'user', 'that', 'will', 'be', 'used', 'when', 'creating', 'a', 'page', 'if', 'a', 'page', 's', 'original', 'author', 'can', 't', 'be', 'found', '.', 'preferred_lang', 'is', 'the', 'language', 'code', 'of', 'the', 'slugs', 'to', 'include', 'in', 'error', 'messages', '(', 'defaults', 'to', 'settings', '.', 'PAGE_DEFAULT_LANGUAGE', ')', '.']
train
https://github.com/ArabellaTech/django-basic-cms/blob/863f3c6098606f663994930cd8e7723ad0c07caf/basic_cms/utils.py#L49-L89
1,210
litters/shrew
shrew/utils/auth.py
get_password_from_keyring
def get_password_from_keyring(entry=None, username=None): """ :param entry: The entry in the keychain. This is a caller specific key. :param username: The username to get the password for. Default is the current user. """ if username is None: username = get_username() has_keychain = initialize_keychain() # Unlock the user's keychain otherwise, if running under SSH, 'security(1)' will thrown an error. unlock_keychain(username) if has_keychain and entry is not None: try: return keyring.get_password(entry, username) except Exception as e: log.warn("Unable to get password from keyring. Continuing..") log.debug(e) return None
python
def get_password_from_keyring(entry=None, username=None): """ :param entry: The entry in the keychain. This is a caller specific key. :param username: The username to get the password for. Default is the current user. """ if username is None: username = get_username() has_keychain = initialize_keychain() # Unlock the user's keychain otherwise, if running under SSH, 'security(1)' will thrown an error. unlock_keychain(username) if has_keychain and entry is not None: try: return keyring.get_password(entry, username) except Exception as e: log.warn("Unable to get password from keyring. Continuing..") log.debug(e) return None
['def', 'get_password_from_keyring', '(', 'entry', '=', 'None', ',', 'username', '=', 'None', ')', ':', 'if', 'username', 'is', 'None', ':', 'username', '=', 'get_username', '(', ')', 'has_keychain', '=', 'initialize_keychain', '(', ')', "# Unlock the user's keychain otherwise, if running under SSH, 'security(1)' will thrown an error.", 'unlock_keychain', '(', 'username', ')', 'if', 'has_keychain', 'and', 'entry', 'is', 'not', 'None', ':', 'try', ':', 'return', 'keyring', '.', 'get_password', '(', 'entry', ',', 'username', ')', 'except', 'Exception', 'as', 'e', ':', 'log', '.', 'warn', '(', '"Unable to get password from keyring. Continuing.."', ')', 'log', '.', 'debug', '(', 'e', ')', 'return', 'None']
:param entry: The entry in the keychain. This is a caller specific key. :param username: The username to get the password for. Default is the current user.
[':', 'param', 'entry', ':', 'The', 'entry', 'in', 'the', 'keychain', '.', 'This', 'is', 'a', 'caller', 'specific', 'key', '.', ':', 'param', 'username', ':', 'The', 'username', 'to', 'get', 'the', 'password', 'for', '.', 'Default', 'is', 'the', 'current', 'user', '.']
train
https://github.com/litters/shrew/blob/ed4b1879321d858d6bc884d14fea7557372a4d41/shrew/utils/auth.py#L226-L247
1,211
novopl/peltak
src/peltak/extra/gitflow/logic/hotfix.py
finish
def finish(): # type: () -> None """ Merge current feature into develop. """ pretend = context.get('pretend', False) if not pretend and (git.staged() or git.unstaged()): log.err( "You have uncommitted changes in your repo!\n" "You need to stash them before you merge the hotfix branch" ) sys.exit(1) develop = conf.get('git.devel_branch', 'develop') master = conf.get('git.master_branch', 'master') branch = git.current_branch(refresh=True) common.assert_branch_type('hotfix') # Merge hotfix into master common.git_checkout(master) common.git_pull(master) common.git_merge(master, branch.name) # Merge hotfix into develop common.git_checkout(develop) common.git_pull(develop) common.git_merge(develop, branch.name) # Cleanup common.git_branch_delete(branch.name) common.git_prune() common.git_checkout(master)
python
def finish(): # type: () -> None """ Merge current feature into develop. """ pretend = context.get('pretend', False) if not pretend and (git.staged() or git.unstaged()): log.err( "You have uncommitted changes in your repo!\n" "You need to stash them before you merge the hotfix branch" ) sys.exit(1) develop = conf.get('git.devel_branch', 'develop') master = conf.get('git.master_branch', 'master') branch = git.current_branch(refresh=True) common.assert_branch_type('hotfix') # Merge hotfix into master common.git_checkout(master) common.git_pull(master) common.git_merge(master, branch.name) # Merge hotfix into develop common.git_checkout(develop) common.git_pull(develop) common.git_merge(develop, branch.name) # Cleanup common.git_branch_delete(branch.name) common.git_prune() common.git_checkout(master)
['def', 'finish', '(', ')', ':', '# type: () -> None', 'pretend', '=', 'context', '.', 'get', '(', "'pretend'", ',', 'False', ')', 'if', 'not', 'pretend', 'and', '(', 'git', '.', 'staged', '(', ')', 'or', 'git', '.', 'unstaged', '(', ')', ')', ':', 'log', '.', 'err', '(', '"You have uncommitted changes in your repo!\\n"', '"You need to stash them before you merge the hotfix branch"', ')', 'sys', '.', 'exit', '(', '1', ')', 'develop', '=', 'conf', '.', 'get', '(', "'git.devel_branch'", ',', "'develop'", ')', 'master', '=', 'conf', '.', 'get', '(', "'git.master_branch'", ',', "'master'", ')', 'branch', '=', 'git', '.', 'current_branch', '(', 'refresh', '=', 'True', ')', 'common', '.', 'assert_branch_type', '(', "'hotfix'", ')', '# Merge hotfix into master', 'common', '.', 'git_checkout', '(', 'master', ')', 'common', '.', 'git_pull', '(', 'master', ')', 'common', '.', 'git_merge', '(', 'master', ',', 'branch', '.', 'name', ')', '# Merge hotfix into develop', 'common', '.', 'git_checkout', '(', 'develop', ')', 'common', '.', 'git_pull', '(', 'develop', ')', 'common', '.', 'git_merge', '(', 'develop', ',', 'branch', '.', 'name', ')', '# Cleanup', 'common', '.', 'git_branch_delete', '(', 'branch', '.', 'name', ')', 'common', '.', 'git_prune', '(', ')', 'common', '.', 'git_checkout', '(', 'master', ')']
Merge current feature into develop.
['Merge', 'current', 'feature', 'into', 'develop', '.']
train
https://github.com/novopl/peltak/blob/b627acc019e3665875fe76cdca0a14773b69beaa/src/peltak/extra/gitflow/logic/hotfix.py#L76-L108
1,212
JarryShaw/PyPCAPKit
src/corekit/protochain.py
_AliasList.count
def count(self, value): """S.count(value) -> integer -- return number of occurrences of value""" from pcapkit.protocols.protocol import Protocol try: flag = issubclass(value, Protocol) except TypeError: flag = issubclass(type(value), Protocol) if flag or isinstance(value, Protocol): value = value.__index__() if isinstance(value, tuple): value = r'|'.join(value) with contextlib.suppress(Exception): return sum(1 for data in self.__data__ if re.fullmatch(value, data, re.IGNORECASE) is not None) return 0
python
def count(self, value): """S.count(value) -> integer -- return number of occurrences of value""" from pcapkit.protocols.protocol import Protocol try: flag = issubclass(value, Protocol) except TypeError: flag = issubclass(type(value), Protocol) if flag or isinstance(value, Protocol): value = value.__index__() if isinstance(value, tuple): value = r'|'.join(value) with contextlib.suppress(Exception): return sum(1 for data in self.__data__ if re.fullmatch(value, data, re.IGNORECASE) is not None) return 0
['def', 'count', '(', 'self', ',', 'value', ')', ':', 'from', 'pcapkit', '.', 'protocols', '.', 'protocol', 'import', 'Protocol', 'try', ':', 'flag', '=', 'issubclass', '(', 'value', ',', 'Protocol', ')', 'except', 'TypeError', ':', 'flag', '=', 'issubclass', '(', 'type', '(', 'value', ')', ',', 'Protocol', ')', 'if', 'flag', 'or', 'isinstance', '(', 'value', ',', 'Protocol', ')', ':', 'value', '=', 'value', '.', '__index__', '(', ')', 'if', 'isinstance', '(', 'value', ',', 'tuple', ')', ':', 'value', '=', "r'|'", '.', 'join', '(', 'value', ')', 'with', 'contextlib', '.', 'suppress', '(', 'Exception', ')', ':', 'return', 'sum', '(', '1', 'for', 'data', 'in', 'self', '.', '__data__', 'if', 're', '.', 'fullmatch', '(', 'value', ',', 'data', ',', 're', '.', 'IGNORECASE', ')', 'is', 'not', 'None', ')', 'return', '0']
S.count(value) -> integer -- return number of occurrences of value
['S', '.', 'count', '(', 'value', ')', '-', '>', 'integer', '--', 'return', 'number', 'of', 'occurrences', 'of', 'value']
train
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/corekit/protochain.py#L116-L130
1,213
ahwillia/tensortools
tensortools/visualization.py
plot_similarity
def plot_similarity(ensemble, ax=None, jitter=0.1, scatter_kw=dict(), line_kw=dict()): """Plots similarity across optimization runs as a function of model rank. Parameters ---------- ensemble : Ensemble object holds optimization results across a range of model ranks ax : matplotlib axis (optional) axis to plot on (defaults to current axis object) jitter : float (optional) amount of horizontal jitter added to scatterpoints (default=0.1) scatter_kw : dict (optional) keyword arguments for styling the scatterpoints line_kw : dict (optional) keyword arguments for styling the line References ---------- Ulrike von Luxburg (2010). Clustering Stability: An Overview. Foundations and Trends in Machine Learning. https://arxiv.org/abs/1007.1075 """ if ax is None: ax = plt.gca() # compile statistics for plotting x, sim, mean_sim = [], [], [] for rank in sorted(ensemble.results): # reconstruction errors for rank-r models s = ensemble.similarities(rank)[1:] sim.extend(s) x.extend(np.full(len(s), rank)) mean_sim.append(np.mean(s)) # add horizontal jitter ux = np.unique(x) x = np.array(x) + (np.random.rand(len(x))-0.5)*jitter # make plot ax.scatter(x, sim, **scatter_kw) ax.plot(ux, mean_sim, **line_kw) ax.set_xlabel('model rank') ax.set_ylabel('model similarity') ax.set_ylim([0, 1.1]) return ax
python
def plot_similarity(ensemble, ax=None, jitter=0.1, scatter_kw=dict(), line_kw=dict()): """Plots similarity across optimization runs as a function of model rank. Parameters ---------- ensemble : Ensemble object holds optimization results across a range of model ranks ax : matplotlib axis (optional) axis to plot on (defaults to current axis object) jitter : float (optional) amount of horizontal jitter added to scatterpoints (default=0.1) scatter_kw : dict (optional) keyword arguments for styling the scatterpoints line_kw : dict (optional) keyword arguments for styling the line References ---------- Ulrike von Luxburg (2010). Clustering Stability: An Overview. Foundations and Trends in Machine Learning. https://arxiv.org/abs/1007.1075 """ if ax is None: ax = plt.gca() # compile statistics for plotting x, sim, mean_sim = [], [], [] for rank in sorted(ensemble.results): # reconstruction errors for rank-r models s = ensemble.similarities(rank)[1:] sim.extend(s) x.extend(np.full(len(s), rank)) mean_sim.append(np.mean(s)) # add horizontal jitter ux = np.unique(x) x = np.array(x) + (np.random.rand(len(x))-0.5)*jitter # make plot ax.scatter(x, sim, **scatter_kw) ax.plot(ux, mean_sim, **line_kw) ax.set_xlabel('model rank') ax.set_ylabel('model similarity') ax.set_ylim([0, 1.1]) return ax
['def', 'plot_similarity', '(', 'ensemble', ',', 'ax', '=', 'None', ',', 'jitter', '=', '0.1', ',', 'scatter_kw', '=', 'dict', '(', ')', ',', 'line_kw', '=', 'dict', '(', ')', ')', ':', 'if', 'ax', 'is', 'None', ':', 'ax', '=', 'plt', '.', 'gca', '(', ')', '# compile statistics for plotting', 'x', ',', 'sim', ',', 'mean_sim', '=', '[', ']', ',', '[', ']', ',', '[', ']', 'for', 'rank', 'in', 'sorted', '(', 'ensemble', '.', 'results', ')', ':', '# reconstruction errors for rank-r models', 's', '=', 'ensemble', '.', 'similarities', '(', 'rank', ')', '[', '1', ':', ']', 'sim', '.', 'extend', '(', 's', ')', 'x', '.', 'extend', '(', 'np', '.', 'full', '(', 'len', '(', 's', ')', ',', 'rank', ')', ')', 'mean_sim', '.', 'append', '(', 'np', '.', 'mean', '(', 's', ')', ')', '# add horizontal jitter', 'ux', '=', 'np', '.', 'unique', '(', 'x', ')', 'x', '=', 'np', '.', 'array', '(', 'x', ')', '+', '(', 'np', '.', 'random', '.', 'rand', '(', 'len', '(', 'x', ')', ')', '-', '0.5', ')', '*', 'jitter', '# make plot', 'ax', '.', 'scatter', '(', 'x', ',', 'sim', ',', '*', '*', 'scatter_kw', ')', 'ax', '.', 'plot', '(', 'ux', ',', 'mean_sim', ',', '*', '*', 'line_kw', ')', 'ax', '.', 'set_xlabel', '(', "'model rank'", ')', 'ax', '.', 'set_ylabel', '(', "'model similarity'", ')', 'ax', '.', 'set_ylim', '(', '[', '0', ',', '1.1', ']', ')', 'return', 'ax']
Plots similarity across optimization runs as a function of model rank. Parameters ---------- ensemble : Ensemble object holds optimization results across a range of model ranks ax : matplotlib axis (optional) axis to plot on (defaults to current axis object) jitter : float (optional) amount of horizontal jitter added to scatterpoints (default=0.1) scatter_kw : dict (optional) keyword arguments for styling the scatterpoints line_kw : dict (optional) keyword arguments for styling the line References ---------- Ulrike von Luxburg (2010). Clustering Stability: An Overview. Foundations and Trends in Machine Learning. https://arxiv.org/abs/1007.1075
['Plots', 'similarity', 'across', 'optimization', 'runs', 'as', 'a', 'function', 'of', 'model', 'rank', '.']
train
https://github.com/ahwillia/tensortools/blob/f375633ec621caa96665a56205dcf932590d4a6e/tensortools/visualization.py#L64-L113
1,214
project-ncl/pnc-cli
pnc_cli/tools/tasks.py
Tasks.tsort
def tsort(self): """Given a partial ordering, return a totally ordered list. part is a dict of partial orderings. Each value is a set, which the key depends on. The return value is a list of sets, each of which has only dependencies on items in previous entries in the list. raise ValueError if ordering is not possible (check for circular or missing dependencies)""" task_dict = {} for key, task in self.tasks.iteritems(): task_dict[task] = task.dependencies # parts = parts.copy() parts = task_dict.copy() result = [] while True: level = set([name for name, deps in parts.iteritems() if not deps]) if not level: break result.append(level) parts = dict([(name, deps - level) for name, deps in parts.iteritems() if name not in level]) if parts: raise ValueError('total ordering not possible (check for circular or missing dependencies)') return result
python
def tsort(self): """Given a partial ordering, return a totally ordered list. part is a dict of partial orderings. Each value is a set, which the key depends on. The return value is a list of sets, each of which has only dependencies on items in previous entries in the list. raise ValueError if ordering is not possible (check for circular or missing dependencies)""" task_dict = {} for key, task in self.tasks.iteritems(): task_dict[task] = task.dependencies # parts = parts.copy() parts = task_dict.copy() result = [] while True: level = set([name for name, deps in parts.iteritems() if not deps]) if not level: break result.append(level) parts = dict([(name, deps - level) for name, deps in parts.iteritems() if name not in level]) if parts: raise ValueError('total ordering not possible (check for circular or missing dependencies)') return result
['def', 'tsort', '(', 'self', ')', ':', 'task_dict', '=', '{', '}', 'for', 'key', ',', 'task', 'in', 'self', '.', 'tasks', '.', 'iteritems', '(', ')', ':', 'task_dict', '[', 'task', ']', '=', 'task', '.', 'dependencies', '# parts = parts.copy()', 'parts', '=', 'task_dict', '.', 'copy', '(', ')', 'result', '=', '[', ']', 'while', 'True', ':', 'level', '=', 'set', '(', '[', 'name', 'for', 'name', ',', 'deps', 'in', 'parts', '.', 'iteritems', '(', ')', 'if', 'not', 'deps', ']', ')', 'if', 'not', 'level', ':', 'break', 'result', '.', 'append', '(', 'level', ')', 'parts', '=', 'dict', '(', '[', '(', 'name', ',', 'deps', '-', 'level', ')', 'for', 'name', ',', 'deps', 'in', 'parts', '.', 'iteritems', '(', ')', 'if', 'name', 'not', 'in', 'level', ']', ')', 'if', 'parts', ':', 'raise', 'ValueError', '(', "'total ordering not possible (check for circular or missing dependencies)'", ')', 'return', 'result']
Given a partial ordering, return a totally ordered list. part is a dict of partial orderings. Each value is a set, which the key depends on. The return value is a list of sets, each of which has only dependencies on items in previous entries in the list. raise ValueError if ordering is not possible (check for circular or missing dependencies)
['Given', 'a', 'partial', 'ordering', 'return', 'a', 'totally', 'ordered', 'list', '.']
train
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/tools/tasks.py#L197-L223
1,215
awickert/gFlex
gflex/f1d.py
F1D.calc_max_flexural_wavelength
def calc_max_flexural_wavelength(self): """ Returns the approximate maximum flexural wavelength This is important when padding of the grid is required: in Flexure (this code), grids are padded out to one maximum flexural wavelength, but in any case, the flexural wavelength is a good characteristic distance for any truncation limit """ if np.isscalar(self.D): Dmax = self.D else: Dmax = self.D.max() # This is an approximation if there is fill that evolves with iterations # (e.g., water), but should be good enough that this won't do much to it alpha = (4*Dmax/(self.drho*self.g))**.25 # 2D flexural parameter self.maxFlexuralWavelength = 2*np.pi*alpha self.maxFlexuralWavelength_ncells = int(np.ceil(self.maxFlexuralWavelength / self.dx))
python
def calc_max_flexural_wavelength(self): """ Returns the approximate maximum flexural wavelength This is important when padding of the grid is required: in Flexure (this code), grids are padded out to one maximum flexural wavelength, but in any case, the flexural wavelength is a good characteristic distance for any truncation limit """ if np.isscalar(self.D): Dmax = self.D else: Dmax = self.D.max() # This is an approximation if there is fill that evolves with iterations # (e.g., water), but should be good enough that this won't do much to it alpha = (4*Dmax/(self.drho*self.g))**.25 # 2D flexural parameter self.maxFlexuralWavelength = 2*np.pi*alpha self.maxFlexuralWavelength_ncells = int(np.ceil(self.maxFlexuralWavelength / self.dx))
['def', 'calc_max_flexural_wavelength', '(', 'self', ')', ':', 'if', 'np', '.', 'isscalar', '(', 'self', '.', 'D', ')', ':', 'Dmax', '=', 'self', '.', 'D', 'else', ':', 'Dmax', '=', 'self', '.', 'D', '.', 'max', '(', ')', '# This is an approximation if there is fill that evolves with iterations \r', "# (e.g., water), but should be good enough that this won't do much to it\r", 'alpha', '=', '(', '4', '*', 'Dmax', '/', '(', 'self', '.', 'drho', '*', 'self', '.', 'g', ')', ')', '**', '.25', '# 2D flexural parameter\r', 'self', '.', 'maxFlexuralWavelength', '=', '2', '*', 'np', '.', 'pi', '*', 'alpha', 'self', '.', 'maxFlexuralWavelength_ncells', '=', 'int', '(', 'np', '.', 'ceil', '(', 'self', '.', 'maxFlexuralWavelength', '/', 'self', '.', 'dx', ')', ')']
Returns the approximate maximum flexural wavelength This is important when padding of the grid is required: in Flexure (this code), grids are padded out to one maximum flexural wavelength, but in any case, the flexural wavelength is a good characteristic distance for any truncation limit
['Returns', 'the', 'approximate', 'maximum', 'flexural', 'wavelength', 'This', 'is', 'important', 'when', 'padding', 'of', 'the', 'grid', 'is', 'required', ':', 'in', 'Flexure', '(', 'this', 'code', ')', 'grids', 'are', 'padded', 'out', 'to', 'one', 'maximum', 'flexural', 'wavelength', 'but', 'in', 'any', 'case', 'the', 'flexural', 'wavelength', 'is', 'a', 'good', 'characteristic', 'distance', 'for', 'any', 'truncation', 'limit']
train
https://github.com/awickert/gFlex/blob/3ac32249375b0f8d342a142585d86ea4d905a5a0/gflex/f1d.py#L562-L578
1,216
tanghaibao/jcvi
jcvi/annotation/qc.py
uniq
def uniq(args): """ %prog uniq gffile cdsfasta Remove overlapping gene models. Similar to formats.gff.uniq(), overlapping 'piles' are processed, one by one. Here, we use a different algorithm, that retains the best non-overlapping subset witin each pile, rather than single best model. Scoring function is also different, rather than based on score or span, we optimize for the subset that show the best combined score. Score is defined by: score = (1 - AED) * length """ p = OptionParser(uniq.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) gffile, cdsfasta = args gff = Gff(gffile) sizes = Sizes(cdsfasta).mapping gene_register = {} for g in gff: if g.type != "mRNA": continue aed = float(g.attributes["_AED"][0]) gene_register[g.parent] = (1 - aed) * sizes[g.accn] allgenes = import_feats(gffile) g = get_piles(allgenes) bestids = set() for group in g: ranges = [to_range(x, score=gene_register[x.accn], id=x.accn) \ for x in group] selected_chain, score = range_chain(ranges) bestids |= set(x.id for x in selected_chain) removed = set(x.accn for x in allgenes) - bestids fw = open("removed.ids", "w") print("\n".join(sorted(removed)), file=fw) fw.close() populate_children(opts.outfile, bestids, gffile, "gene")
python
def uniq(args): """ %prog uniq gffile cdsfasta Remove overlapping gene models. Similar to formats.gff.uniq(), overlapping 'piles' are processed, one by one. Here, we use a different algorithm, that retains the best non-overlapping subset witin each pile, rather than single best model. Scoring function is also different, rather than based on score or span, we optimize for the subset that show the best combined score. Score is defined by: score = (1 - AED) * length """ p = OptionParser(uniq.__doc__) p.set_outfile() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) gffile, cdsfasta = args gff = Gff(gffile) sizes = Sizes(cdsfasta).mapping gene_register = {} for g in gff: if g.type != "mRNA": continue aed = float(g.attributes["_AED"][0]) gene_register[g.parent] = (1 - aed) * sizes[g.accn] allgenes = import_feats(gffile) g = get_piles(allgenes) bestids = set() for group in g: ranges = [to_range(x, score=gene_register[x.accn], id=x.accn) \ for x in group] selected_chain, score = range_chain(ranges) bestids |= set(x.id for x in selected_chain) removed = set(x.accn for x in allgenes) - bestids fw = open("removed.ids", "w") print("\n".join(sorted(removed)), file=fw) fw.close() populate_children(opts.outfile, bestids, gffile, "gene")
['def', 'uniq', '(', 'args', ')', ':', 'p', '=', 'OptionParser', '(', 'uniq', '.', '__doc__', ')', 'p', '.', 'set_outfile', '(', ')', 'opts', ',', 'args', '=', 'p', '.', 'parse_args', '(', 'args', ')', 'if', 'len', '(', 'args', ')', '!=', '2', ':', 'sys', '.', 'exit', '(', 'not', 'p', '.', 'print_help', '(', ')', ')', 'gffile', ',', 'cdsfasta', '=', 'args', 'gff', '=', 'Gff', '(', 'gffile', ')', 'sizes', '=', 'Sizes', '(', 'cdsfasta', ')', '.', 'mapping', 'gene_register', '=', '{', '}', 'for', 'g', 'in', 'gff', ':', 'if', 'g', '.', 'type', '!=', '"mRNA"', ':', 'continue', 'aed', '=', 'float', '(', 'g', '.', 'attributes', '[', '"_AED"', ']', '[', '0', ']', ')', 'gene_register', '[', 'g', '.', 'parent', ']', '=', '(', '1', '-', 'aed', ')', '*', 'sizes', '[', 'g', '.', 'accn', ']', 'allgenes', '=', 'import_feats', '(', 'gffile', ')', 'g', '=', 'get_piles', '(', 'allgenes', ')', 'bestids', '=', 'set', '(', ')', 'for', 'group', 'in', 'g', ':', 'ranges', '=', '[', 'to_range', '(', 'x', ',', 'score', '=', 'gene_register', '[', 'x', '.', 'accn', ']', ',', 'id', '=', 'x', '.', 'accn', ')', 'for', 'x', 'in', 'group', ']', 'selected_chain', ',', 'score', '=', 'range_chain', '(', 'ranges', ')', 'bestids', '|=', 'set', '(', 'x', '.', 'id', 'for', 'x', 'in', 'selected_chain', ')', 'removed', '=', 'set', '(', 'x', '.', 'accn', 'for', 'x', 'in', 'allgenes', ')', '-', 'bestids', 'fw', '=', 'open', '(', '"removed.ids"', ',', '"w"', ')', 'print', '(', '"\\n"', '.', 'join', '(', 'sorted', '(', 'removed', ')', ')', ',', 'file', '=', 'fw', ')', 'fw', '.', 'close', '(', ')', 'populate_children', '(', 'opts', '.', 'outfile', ',', 'bestids', ',', 'gffile', ',', '"gene"', ')']
%prog uniq gffile cdsfasta Remove overlapping gene models. Similar to formats.gff.uniq(), overlapping 'piles' are processed, one by one. Here, we use a different algorithm, that retains the best non-overlapping subset witin each pile, rather than single best model. Scoring function is also different, rather than based on score or span, we optimize for the subset that show the best combined score. Score is defined by: score = (1 - AED) * length
['%prog', 'uniq', 'gffile', 'cdsfasta']
train
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/annotation/qc.py#L34-L80
1,217
phaethon/kamene
kamene/contrib/gsm_um.py
deactivatePdpContextAccept
def deactivatePdpContextAccept(): """DEACTIVATE PDP CONTEXT ACCEPT Section 9.5.9""" a = TpPd(pd=0x8) b = MessageType(mesType=0x47) # 01000111 packet = a / b return packet
python
def deactivatePdpContextAccept(): """DEACTIVATE PDP CONTEXT ACCEPT Section 9.5.9""" a = TpPd(pd=0x8) b = MessageType(mesType=0x47) # 01000111 packet = a / b return packet
['def', 'deactivatePdpContextAccept', '(', ')', ':', 'a', '=', 'TpPd', '(', 'pd', '=', '0x8', ')', 'b', '=', 'MessageType', '(', 'mesType', '=', '0x47', ')', '# 01000111', 'packet', '=', 'a', '/', 'b', 'return', 'packet']
DEACTIVATE PDP CONTEXT ACCEPT Section 9.5.9
['DEACTIVATE', 'PDP', 'CONTEXT', 'ACCEPT', 'Section', '9', '.', '5', '.', '9']
train
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/contrib/gsm_um.py#L2726-L2731
1,218
sentinel-hub/eo-learn
core/eolearn/core/core_tasks.py
RenameFeature.execute
def execute(self, eopatch): """Returns the EOPatch with renamed features. :param eopatch: input EOPatch :type eopatch: EOPatch :return: input EOPatch with the renamed features :rtype: EOPatch """ for feature_type, feature_name, new_feature_name in self.feature_gen(eopatch): eopatch[feature_type][new_feature_name] = eopatch[feature_type][feature_name] del eopatch[feature_type][feature_name] return eopatch
python
def execute(self, eopatch): """Returns the EOPatch with renamed features. :param eopatch: input EOPatch :type eopatch: EOPatch :return: input EOPatch with the renamed features :rtype: EOPatch """ for feature_type, feature_name, new_feature_name in self.feature_gen(eopatch): eopatch[feature_type][new_feature_name] = eopatch[feature_type][feature_name] del eopatch[feature_type][feature_name] return eopatch
['def', 'execute', '(', 'self', ',', 'eopatch', ')', ':', 'for', 'feature_type', ',', 'feature_name', ',', 'new_feature_name', 'in', 'self', '.', 'feature_gen', '(', 'eopatch', ')', ':', 'eopatch', '[', 'feature_type', ']', '[', 'new_feature_name', ']', '=', 'eopatch', '[', 'feature_type', ']', '[', 'feature_name', ']', 'del', 'eopatch', '[', 'feature_type', ']', '[', 'feature_name', ']', 'return', 'eopatch']
Returns the EOPatch with renamed features. :param eopatch: input EOPatch :type eopatch: EOPatch :return: input EOPatch with the renamed features :rtype: EOPatch
['Returns', 'the', 'EOPatch', 'with', 'renamed', 'features', '.']
train
https://github.com/sentinel-hub/eo-learn/blob/b8c390b9f553c561612fe9eb64e720611633a035/core/eolearn/core/core_tasks.py#L162-L174
1,219
yamcs/yamcs-python
yamcs-client/yamcs/client.py
YamcsClient.list_clients
def list_clients(self, instance=None): """ Lists the clients. :param Optional[str] instance: A Yamcs instance name. :rtype: ~collections.Iterable[yamcs.model.Client] """ # Server does not do pagination on listings of this resource. # Return an iterator anyway for similarity with other API methods url = '/clients' if instance: url = '/instances/{}/clients'.format(instance) response = self.get_proto(path=url) message = rest_pb2.ListClientsResponse() message.ParseFromString(response.content) clients = getattr(message, 'client') return iter([Client(client) for client in clients])
python
def list_clients(self, instance=None): """ Lists the clients. :param Optional[str] instance: A Yamcs instance name. :rtype: ~collections.Iterable[yamcs.model.Client] """ # Server does not do pagination on listings of this resource. # Return an iterator anyway for similarity with other API methods url = '/clients' if instance: url = '/instances/{}/clients'.format(instance) response = self.get_proto(path=url) message = rest_pb2.ListClientsResponse() message.ParseFromString(response.content) clients = getattr(message, 'client') return iter([Client(client) for client in clients])
['def', 'list_clients', '(', 'self', ',', 'instance', '=', 'None', ')', ':', '# Server does not do pagination on listings of this resource.', '# Return an iterator anyway for similarity with other API methods', 'url', '=', "'/clients'", 'if', 'instance', ':', 'url', '=', "'/instances/{}/clients'", '.', 'format', '(', 'instance', ')', 'response', '=', 'self', '.', 'get_proto', '(', 'path', '=', 'url', ')', 'message', '=', 'rest_pb2', '.', 'ListClientsResponse', '(', ')', 'message', '.', 'ParseFromString', '(', 'response', '.', 'content', ')', 'clients', '=', 'getattr', '(', 'message', ',', "'client'", ')', 'return', 'iter', '(', '[', 'Client', '(', 'client', ')', 'for', 'client', 'in', 'clients', ']', ')']
Lists the clients. :param Optional[str] instance: A Yamcs instance name. :rtype: ~collections.Iterable[yamcs.model.Client]
['Lists', 'the', 'clients', '.']
train
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/client.py#L312-L328
1,220
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/targets.py
TargetRegistry.create_typed_target
def create_typed_target (self, type, project, name, sources, requirements, default_build, usage_requirements): """ Creates a TypedTarget with the specified properties. The 'name', 'sources', 'requirements', 'default_build' and 'usage_requirements' are assumed to be in the form specified by the user in Jamfile corresponding to 'project'. """ assert isinstance(type, basestring) assert isinstance(project, ProjectTarget) assert is_iterable_typed(sources, basestring) assert is_iterable_typed(requirements, basestring) assert is_iterable_typed(default_build, basestring) return self.main_target_alternative (TypedTarget (name, project, type, self.main_target_sources (sources, name), self.main_target_requirements (requirements, project), self.main_target_default_build (default_build, project), self.main_target_usage_requirements (usage_requirements, project)))
python
def create_typed_target (self, type, project, name, sources, requirements, default_build, usage_requirements): """ Creates a TypedTarget with the specified properties. The 'name', 'sources', 'requirements', 'default_build' and 'usage_requirements' are assumed to be in the form specified by the user in Jamfile corresponding to 'project'. """ assert isinstance(type, basestring) assert isinstance(project, ProjectTarget) assert is_iterable_typed(sources, basestring) assert is_iterable_typed(requirements, basestring) assert is_iterable_typed(default_build, basestring) return self.main_target_alternative (TypedTarget (name, project, type, self.main_target_sources (sources, name), self.main_target_requirements (requirements, project), self.main_target_default_build (default_build, project), self.main_target_usage_requirements (usage_requirements, project)))
['def', 'create_typed_target', '(', 'self', ',', 'type', ',', 'project', ',', 'name', ',', 'sources', ',', 'requirements', ',', 'default_build', ',', 'usage_requirements', ')', ':', 'assert', 'isinstance', '(', 'type', ',', 'basestring', ')', 'assert', 'isinstance', '(', 'project', ',', 'ProjectTarget', ')', 'assert', 'is_iterable_typed', '(', 'sources', ',', 'basestring', ')', 'assert', 'is_iterable_typed', '(', 'requirements', ',', 'basestring', ')', 'assert', 'is_iterable_typed', '(', 'default_build', ',', 'basestring', ')', 'return', 'self', '.', 'main_target_alternative', '(', 'TypedTarget', '(', 'name', ',', 'project', ',', 'type', ',', 'self', '.', 'main_target_sources', '(', 'sources', ',', 'name', ')', ',', 'self', '.', 'main_target_requirements', '(', 'requirements', ',', 'project', ')', ',', 'self', '.', 'main_target_default_build', '(', 'default_build', ',', 'project', ')', ',', 'self', '.', 'main_target_usage_requirements', '(', 'usage_requirements', ',', 'project', ')', ')', ')']
Creates a TypedTarget with the specified properties. The 'name', 'sources', 'requirements', 'default_build' and 'usage_requirements' are assumed to be in the form specified by the user in Jamfile corresponding to 'project'.
['Creates', 'a', 'TypedTarget', 'with', 'the', 'specified', 'properties', '.', 'The', 'name', 'sources', 'requirements', 'default_build', 'and', 'usage_requirements', 'are', 'assumed', 'to', 'be', 'in', 'the', 'form', 'specified', 'by', 'the', 'user', 'in', 'Jamfile', 'corresponding', 'to', 'project', '.']
train
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/targets.py#L222-L237
1,221
SiLab-Bonn/pyBAR
pybar/fei4/register.py
load_configuration_from_text_file
def load_configuration_from_text_file(register, configuration_file): '''Loading configuration from text files to register object Parameters ---------- register : pybar.fei4.register object configuration_file : string Full path (directory and filename) of the configuration file. If name is not given, reload configuration from file. ''' logging.info("Loading configuration: %s" % configuration_file) register.configuration_file = configuration_file config_dict = parse_global_config(register.configuration_file) if 'Flavor' in config_dict: flavor = config_dict.pop('Flavor').lower() if register.flavor: pass else: register.init_fe_type(flavor) else: if register.flavor: pass else: raise ValueError('Flavor not specified') if 'Chip_ID' in config_dict: chip_id = config_dict.pop('Chip_ID') if register.chip_address: pass else: register.set_chip_address(chip_address=chip_id & 0x7, broadcast=True if chip_id & 0x8 else False) elif 'Chip_Address' in config_dict: chip_address = config_dict.pop('Chip_Address') if register.chip_address: pass else: register.set_chip_address(chip_address) else: if register.chip_id_initialized: pass else: raise ValueError('Chip address not specified') global_registers_configured = [] pixel_registers_configured = [] for key in config_dict.keys(): value = config_dict.pop(key) if key in register.global_registers: register.set_global_register_value(key, value) global_registers_configured.append(key) elif key in register.pixel_registers: register.set_pixel_register_value(key, value) pixel_registers_configured.append(key) elif key in register.calibration_parameters: register.calibration_parameters[key] = value else: register.miscellaneous[key] = value global_registers = register.get_global_register_attributes('name', readonly=False) pixel_registers = register.pixel_registers.keys() global_registers_not_configured = set(global_registers).difference(global_registers_configured) pixel_registers_not_configured = set(pixel_registers).difference(pixel_registers_configured) if global_registers_not_configured: logging.warning("Following global register(s) not configured: {}".format(', '.join('\'' + reg + '\'' for reg in global_registers_not_configured))) if pixel_registers_not_configured: logging.warning("Following pixel register(s) not configured: {}".format(', '.join('\'' + reg + '\'' for reg in pixel_registers_not_configured))) if register.miscellaneous: logging.warning("Found following unknown parameter(s): {}".format(', '.join('\'' + parameter + '\'' for parameter in register.miscellaneous.iterkeys())))
python
def load_configuration_from_text_file(register, configuration_file): '''Loading configuration from text files to register object Parameters ---------- register : pybar.fei4.register object configuration_file : string Full path (directory and filename) of the configuration file. If name is not given, reload configuration from file. ''' logging.info("Loading configuration: %s" % configuration_file) register.configuration_file = configuration_file config_dict = parse_global_config(register.configuration_file) if 'Flavor' in config_dict: flavor = config_dict.pop('Flavor').lower() if register.flavor: pass else: register.init_fe_type(flavor) else: if register.flavor: pass else: raise ValueError('Flavor not specified') if 'Chip_ID' in config_dict: chip_id = config_dict.pop('Chip_ID') if register.chip_address: pass else: register.set_chip_address(chip_address=chip_id & 0x7, broadcast=True if chip_id & 0x8 else False) elif 'Chip_Address' in config_dict: chip_address = config_dict.pop('Chip_Address') if register.chip_address: pass else: register.set_chip_address(chip_address) else: if register.chip_id_initialized: pass else: raise ValueError('Chip address not specified') global_registers_configured = [] pixel_registers_configured = [] for key in config_dict.keys(): value = config_dict.pop(key) if key in register.global_registers: register.set_global_register_value(key, value) global_registers_configured.append(key) elif key in register.pixel_registers: register.set_pixel_register_value(key, value) pixel_registers_configured.append(key) elif key in register.calibration_parameters: register.calibration_parameters[key] = value else: register.miscellaneous[key] = value global_registers = register.get_global_register_attributes('name', readonly=False) pixel_registers = register.pixel_registers.keys() global_registers_not_configured = set(global_registers).difference(global_registers_configured) pixel_registers_not_configured = set(pixel_registers).difference(pixel_registers_configured) if global_registers_not_configured: logging.warning("Following global register(s) not configured: {}".format(', '.join('\'' + reg + '\'' for reg in global_registers_not_configured))) if pixel_registers_not_configured: logging.warning("Following pixel register(s) not configured: {}".format(', '.join('\'' + reg + '\'' for reg in pixel_registers_not_configured))) if register.miscellaneous: logging.warning("Found following unknown parameter(s): {}".format(', '.join('\'' + parameter + '\'' for parameter in register.miscellaneous.iterkeys())))
['def', 'load_configuration_from_text_file', '(', 'register', ',', 'configuration_file', ')', ':', 'logging', '.', 'info', '(', '"Loading configuration: %s"', '%', 'configuration_file', ')', 'register', '.', 'configuration_file', '=', 'configuration_file', 'config_dict', '=', 'parse_global_config', '(', 'register', '.', 'configuration_file', ')', 'if', "'Flavor'", 'in', 'config_dict', ':', 'flavor', '=', 'config_dict', '.', 'pop', '(', "'Flavor'", ')', '.', 'lower', '(', ')', 'if', 'register', '.', 'flavor', ':', 'pass', 'else', ':', 'register', '.', 'init_fe_type', '(', 'flavor', ')', 'else', ':', 'if', 'register', '.', 'flavor', ':', 'pass', 'else', ':', 'raise', 'ValueError', '(', "'Flavor not specified'", ')', 'if', "'Chip_ID'", 'in', 'config_dict', ':', 'chip_id', '=', 'config_dict', '.', 'pop', '(', "'Chip_ID'", ')', 'if', 'register', '.', 'chip_address', ':', 'pass', 'else', ':', 'register', '.', 'set_chip_address', '(', 'chip_address', '=', 'chip_id', '&', '0x7', ',', 'broadcast', '=', 'True', 'if', 'chip_id', '&', '0x8', 'else', 'False', ')', 'elif', "'Chip_Address'", 'in', 'config_dict', ':', 'chip_address', '=', 'config_dict', '.', 'pop', '(', "'Chip_Address'", ')', 'if', 'register', '.', 'chip_address', ':', 'pass', 'else', ':', 'register', '.', 'set_chip_address', '(', 'chip_address', ')', 'else', ':', 'if', 'register', '.', 'chip_id_initialized', ':', 'pass', 'else', ':', 'raise', 'ValueError', '(', "'Chip address not specified'", ')', 'global_registers_configured', '=', '[', ']', 'pixel_registers_configured', '=', '[', ']', 'for', 'key', 'in', 'config_dict', '.', 'keys', '(', ')', ':', 'value', '=', 'config_dict', '.', 'pop', '(', 'key', ')', 'if', 'key', 'in', 'register', '.', 'global_registers', ':', 'register', '.', 'set_global_register_value', '(', 'key', ',', 'value', ')', 'global_registers_configured', '.', 'append', '(', 'key', ')', 'elif', 'key', 'in', 'register', '.', 'pixel_registers', ':', 'register', '.', 'set_pixel_register_value', '(', 'key', ',', 'value', ')', 'pixel_registers_configured', '.', 'append', '(', 'key', ')', 'elif', 'key', 'in', 'register', '.', 'calibration_parameters', ':', 'register', '.', 'calibration_parameters', '[', 'key', ']', '=', 'value', 'else', ':', 'register', '.', 'miscellaneous', '[', 'key', ']', '=', 'value', 'global_registers', '=', 'register', '.', 'get_global_register_attributes', '(', "'name'", ',', 'readonly', '=', 'False', ')', 'pixel_registers', '=', 'register', '.', 'pixel_registers', '.', 'keys', '(', ')', 'global_registers_not_configured', '=', 'set', '(', 'global_registers', ')', '.', 'difference', '(', 'global_registers_configured', ')', 'pixel_registers_not_configured', '=', 'set', '(', 'pixel_registers', ')', '.', 'difference', '(', 'pixel_registers_configured', ')', 'if', 'global_registers_not_configured', ':', 'logging', '.', 'warning', '(', '"Following global register(s) not configured: {}"', '.', 'format', '(', "', '", '.', 'join', '(', "'\\''", '+', 'reg', '+', "'\\''", 'for', 'reg', 'in', 'global_registers_not_configured', ')', ')', ')', 'if', 'pixel_registers_not_configured', ':', 'logging', '.', 'warning', '(', '"Following pixel register(s) not configured: {}"', '.', 'format', '(', "', '", '.', 'join', '(', "'\\''", '+', 'reg', '+', "'\\''", 'for', 'reg', 'in', 'pixel_registers_not_configured', ')', ')', ')', 'if', 'register', '.', 'miscellaneous', ':', 'logging', '.', 'warning', '(', '"Found following unknown parameter(s): {}"', '.', 'format', '(', "', '", '.', 'join', '(', "'\\''", '+', 'parameter', '+', "'\\''", 'for', 'parameter', 'in', 'register', '.', 'miscellaneous', '.', 'iterkeys', '(', ')', ')', ')', ')']
Loading configuration from text files to register object Parameters ---------- register : pybar.fei4.register object configuration_file : string Full path (directory and filename) of the configuration file. If name is not given, reload configuration from file.
['Loading', 'configuration', 'from', 'text', 'files', 'to', 'register', 'object', 'Parameters', '----------', 'register', ':', 'pybar', '.', 'fei4', '.', 'register', 'object', 'configuration_file', ':', 'string', 'Full', 'path', '(', 'directory', 'and', 'filename', ')', 'of', 'the', 'configuration', 'file', '.', 'If', 'name', 'is', 'not', 'given', 'reload', 'configuration', 'from', 'file', '.']
train
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/fei4/register.py#L715-L781
1,222
mikeywaites/flask-arrested
arrested/contrib/kim_arrested.py
KimRequestHandler.handle_error
def handle_error(self, exp): """Called if a Mapper returns MappingInvalid. Should handle the error and return it in the appropriate format, can be overridden in order to change the error format. :param exp: MappingInvalid exception raised """ payload = { "message": "Invalid or incomplete data provided.", "errors": exp.errors } self.endpoint.return_error(self.error_status, payload=payload)
python
def handle_error(self, exp): """Called if a Mapper returns MappingInvalid. Should handle the error and return it in the appropriate format, can be overridden in order to change the error format. :param exp: MappingInvalid exception raised """ payload = { "message": "Invalid or incomplete data provided.", "errors": exp.errors } self.endpoint.return_error(self.error_status, payload=payload)
['def', 'handle_error', '(', 'self', ',', 'exp', ')', ':', 'payload', '=', '{', '"message"', ':', '"Invalid or incomplete data provided."', ',', '"errors"', ':', 'exp', '.', 'errors', '}', 'self', '.', 'endpoint', '.', 'return_error', '(', 'self', '.', 'error_status', ',', 'payload', '=', 'payload', ')']
Called if a Mapper returns MappingInvalid. Should handle the error and return it in the appropriate format, can be overridden in order to change the error format. :param exp: MappingInvalid exception raised
['Called', 'if', 'a', 'Mapper', 'returns', 'MappingInvalid', '.', 'Should', 'handle', 'the', 'error', 'and', 'return', 'it', 'in', 'the', 'appropriate', 'format', 'can', 'be', 'overridden', 'in', 'order', 'to', 'change', 'the', 'error', 'format', '.']
train
https://github.com/mikeywaites/flask-arrested/blob/6b97ce2ad2765f9acab10f4726e310258aa51de0/arrested/contrib/kim_arrested.py#L126-L137
1,223
Yubico/python-pyhsm
pyhsm/ksm/yubikey_ksm.py
YHSM_KSMRequestHandler.my_address_string
def my_address_string(self): """ For logging client host without resolving. """ addr = getattr(self, 'client_address', ('', None))[0] # If listed in proxy_ips, use the X-Forwarded-For header, if present. if addr in self.proxy_ips: return self.headers.getheader('x-forwarded-for', addr) return addr
python
def my_address_string(self): """ For logging client host without resolving. """ addr = getattr(self, 'client_address', ('', None))[0] # If listed in proxy_ips, use the X-Forwarded-For header, if present. if addr in self.proxy_ips: return self.headers.getheader('x-forwarded-for', addr) return addr
['def', 'my_address_string', '(', 'self', ')', ':', 'addr', '=', 'getattr', '(', 'self', ',', "'client_address'", ',', '(', "''", ',', 'None', ')', ')', '[', '0', ']', '# If listed in proxy_ips, use the X-Forwarded-For header, if present.', 'if', 'addr', 'in', 'self', '.', 'proxy_ips', ':', 'return', 'self', '.', 'headers', '.', 'getheader', '(', "'x-forwarded-for'", ',', 'addr', ')', 'return', 'addr']
For logging client host without resolving.
['For', 'logging', 'client', 'host', 'without', 'resolving', '.']
train
https://github.com/Yubico/python-pyhsm/blob/b6e2744d1ea15c352a0fc1d6ebc5950026b71311/pyhsm/ksm/yubikey_ksm.py#L174-L181
1,224
fabioz/PyDev.Debugger
_pydevd_bundle/pydevd_process_net_command_json.py
_PyDevJsonCommandProcessor.on_next_request
def on_next_request(self, py_db, request): ''' :param NextRequest request: ''' arguments = request.arguments # : :type arguments: NextArguments thread_id = arguments.threadId if py_db.get_use_libraries_filter(): step_cmd_id = CMD_STEP_OVER_MY_CODE else: step_cmd_id = CMD_STEP_OVER self.api.request_step(py_db, thread_id, step_cmd_id) response = pydevd_base_schema.build_response(request) return NetCommand(CMD_RETURN, 0, response, is_json=True)
python
def on_next_request(self, py_db, request): ''' :param NextRequest request: ''' arguments = request.arguments # : :type arguments: NextArguments thread_id = arguments.threadId if py_db.get_use_libraries_filter(): step_cmd_id = CMD_STEP_OVER_MY_CODE else: step_cmd_id = CMD_STEP_OVER self.api.request_step(py_db, thread_id, step_cmd_id) response = pydevd_base_schema.build_response(request) return NetCommand(CMD_RETURN, 0, response, is_json=True)
['def', 'on_next_request', '(', 'self', ',', 'py_db', ',', 'request', ')', ':', 'arguments', '=', 'request', '.', 'arguments', '# : :type arguments: NextArguments', 'thread_id', '=', 'arguments', '.', 'threadId', 'if', 'py_db', '.', 'get_use_libraries_filter', '(', ')', ':', 'step_cmd_id', '=', 'CMD_STEP_OVER_MY_CODE', 'else', ':', 'step_cmd_id', '=', 'CMD_STEP_OVER', 'self', '.', 'api', '.', 'request_step', '(', 'py_db', ',', 'thread_id', ',', 'step_cmd_id', ')', 'response', '=', 'pydevd_base_schema', '.', 'build_response', '(', 'request', ')', 'return', 'NetCommand', '(', 'CMD_RETURN', ',', '0', ',', 'response', ',', 'is_json', '=', 'True', ')']
:param NextRequest request:
[':', 'param', 'NextRequest', 'request', ':']
train
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/_pydevd_bundle/pydevd_process_net_command_json.py#L333-L348
1,225
mozilla-releng/scriptworker
scriptworker/ed25519.py
verify_ed25519_signature
def verify_ed25519_signature(public_key, contents, signature, message): """Verify that ``signature`` comes from ``public_key`` and ``contents``. Args: public_key (Ed25519PublicKey): the key to verify the signature contents (bytes): the contents that was signed signature (bytes): the signature to verify message (str): the error message to raise. Raises: ScriptWorkerEd25519Error: on failure """ try: public_key.verify(signature, contents) except InvalidSignature as exc: raise ScriptWorkerEd25519Error(message % {'exc': str(exc)})
python
def verify_ed25519_signature(public_key, contents, signature, message): """Verify that ``signature`` comes from ``public_key`` and ``contents``. Args: public_key (Ed25519PublicKey): the key to verify the signature contents (bytes): the contents that was signed signature (bytes): the signature to verify message (str): the error message to raise. Raises: ScriptWorkerEd25519Error: on failure """ try: public_key.verify(signature, contents) except InvalidSignature as exc: raise ScriptWorkerEd25519Error(message % {'exc': str(exc)})
['def', 'verify_ed25519_signature', '(', 'public_key', ',', 'contents', ',', 'signature', ',', 'message', ')', ':', 'try', ':', 'public_key', '.', 'verify', '(', 'signature', ',', 'contents', ')', 'except', 'InvalidSignature', 'as', 'exc', ':', 'raise', 'ScriptWorkerEd25519Error', '(', 'message', '%', '{', "'exc'", ':', 'str', '(', 'exc', ')', '}', ')']
Verify that ``signature`` comes from ``public_key`` and ``contents``. Args: public_key (Ed25519PublicKey): the key to verify the signature contents (bytes): the contents that was signed signature (bytes): the signature to verify message (str): the error message to raise. Raises: ScriptWorkerEd25519Error: on failure
['Verify', 'that', 'signature', 'comes', 'from', 'public_key', 'and', 'contents', '.']
train
https://github.com/mozilla-releng/scriptworker/blob/8e97bbd83b9b578565ec57904c966dd6ae4ef0ae/scriptworker/ed25519.py#L119-L135
1,226
rohankapoorcom/zm-py
zoneminder/monitor.py
Monitor.is_available
def is_available(self) -> bool: """Indicate if this Monitor is currently available.""" status_response = self._client.get_state( 'api/monitors/daemonStatus/id:{}/daemon:zmc.json'.format( self._monitor_id ) ) if not status_response: _LOGGER.warning('Could not get availability for monitor {}'.format( self._monitor_id )) return False # Monitor_Status was only added in ZM 1.32.3 monitor_status = self._raw_result.get('Monitor_Status', None) capture_fps = monitor_status and monitor_status['CaptureFPS'] return status_response.get('status', False) and capture_fps != "0.00"
python
def is_available(self) -> bool: """Indicate if this Monitor is currently available.""" status_response = self._client.get_state( 'api/monitors/daemonStatus/id:{}/daemon:zmc.json'.format( self._monitor_id ) ) if not status_response: _LOGGER.warning('Could not get availability for monitor {}'.format( self._monitor_id )) return False # Monitor_Status was only added in ZM 1.32.3 monitor_status = self._raw_result.get('Monitor_Status', None) capture_fps = monitor_status and monitor_status['CaptureFPS'] return status_response.get('status', False) and capture_fps != "0.00"
['def', 'is_available', '(', 'self', ')', '->', 'bool', ':', 'status_response', '=', 'self', '.', '_client', '.', 'get_state', '(', "'api/monitors/daemonStatus/id:{}/daemon:zmc.json'", '.', 'format', '(', 'self', '.', '_monitor_id', ')', ')', 'if', 'not', 'status_response', ':', '_LOGGER', '.', 'warning', '(', "'Could not get availability for monitor {}'", '.', 'format', '(', 'self', '.', '_monitor_id', ')', ')', 'return', 'False', '# Monitor_Status was only added in ZM 1.32.3', 'monitor_status', '=', 'self', '.', '_raw_result', '.', 'get', '(', "'Monitor_Status'", ',', 'None', ')', 'capture_fps', '=', 'monitor_status', 'and', 'monitor_status', '[', "'CaptureFPS'", ']', 'return', 'status_response', '.', 'get', '(', "'status'", ',', 'False', ')', 'and', 'capture_fps', '!=', '"0.00"']
Indicate if this Monitor is currently available.
['Indicate', 'if', 'this', 'Monitor', 'is', 'currently', 'available', '.']
train
https://github.com/rohankapoorcom/zm-py/blob/bd3a9f6b2f7b84b37589e2939f628b479a5531bf/zoneminder/monitor.py#L143-L161
1,227
openstack/proliantutils
proliantutils/ilo/ris.py
RISOperations._get_persistent_boot_devices
def _get_persistent_boot_devices(self): """Get details of persistent boot devices, its order :returns: List of dictionary of boot sources and list of boot device order :raises: IloError, on an error from iLO. :raises: IloCommandNotSupportedError, if the command is not supported on the server. """ # Check if the BIOS resource if exists. headers_bios, bios_uri, bios_settings = self._check_bios_resource() # Get the Boot resource. boot_settings = self._get_bios_boot_resource(bios_settings) # Get the BootSources resource try: boot_sources = boot_settings['BootSources'] except KeyError: msg = ("BootSources resource not found.") raise exception.IloError(msg) try: boot_order = boot_settings['PersistentBootConfigOrder'] except KeyError: msg = ("PersistentBootConfigOrder resource not found.") raise exception.IloCommandNotSupportedError(msg) return boot_sources, boot_order
python
def _get_persistent_boot_devices(self): """Get details of persistent boot devices, its order :returns: List of dictionary of boot sources and list of boot device order :raises: IloError, on an error from iLO. :raises: IloCommandNotSupportedError, if the command is not supported on the server. """ # Check if the BIOS resource if exists. headers_bios, bios_uri, bios_settings = self._check_bios_resource() # Get the Boot resource. boot_settings = self._get_bios_boot_resource(bios_settings) # Get the BootSources resource try: boot_sources = boot_settings['BootSources'] except KeyError: msg = ("BootSources resource not found.") raise exception.IloError(msg) try: boot_order = boot_settings['PersistentBootConfigOrder'] except KeyError: msg = ("PersistentBootConfigOrder resource not found.") raise exception.IloCommandNotSupportedError(msg) return boot_sources, boot_order
['def', '_get_persistent_boot_devices', '(', 'self', ')', ':', '# Check if the BIOS resource if exists.', 'headers_bios', ',', 'bios_uri', ',', 'bios_settings', '=', 'self', '.', '_check_bios_resource', '(', ')', '# Get the Boot resource.', 'boot_settings', '=', 'self', '.', '_get_bios_boot_resource', '(', 'bios_settings', ')', '# Get the BootSources resource', 'try', ':', 'boot_sources', '=', 'boot_settings', '[', "'BootSources'", ']', 'except', 'KeyError', ':', 'msg', '=', '(', '"BootSources resource not found."', ')', 'raise', 'exception', '.', 'IloError', '(', 'msg', ')', 'try', ':', 'boot_order', '=', 'boot_settings', '[', "'PersistentBootConfigOrder'", ']', 'except', 'KeyError', ':', 'msg', '=', '(', '"PersistentBootConfigOrder resource not found."', ')', 'raise', 'exception', '.', 'IloCommandNotSupportedError', '(', 'msg', ')', 'return', 'boot_sources', ',', 'boot_order']
Get details of persistent boot devices, its order :returns: List of dictionary of boot sources and list of boot device order :raises: IloError, on an error from iLO. :raises: IloCommandNotSupportedError, if the command is not supported on the server.
['Get', 'details', 'of', 'persistent', 'boot', 'devices', 'its', 'order']
train
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ris.py#L1497-L1525
1,228
kibitzr/kibitzr
kibitzr/fetcher/factory.py
fetcher_factory
def fetcher_factory(conf): """Return initialized fetcher capable of processing given conf.""" global PROMOTERS applicable = [] if not PROMOTERS: PROMOTERS = load_promoters() for promoter in PROMOTERS: if promoter.is_applicable(conf): applicable.append((promoter.PRIORITY, promoter)) if applicable: best_match = sorted(applicable, reverse=True)[0][1] return best_match(conf) else: raise ConfigurationError( 'No fetcher is applicable for "{0}"'.format(conf['name']) )
python
def fetcher_factory(conf): """Return initialized fetcher capable of processing given conf.""" global PROMOTERS applicable = [] if not PROMOTERS: PROMOTERS = load_promoters() for promoter in PROMOTERS: if promoter.is_applicable(conf): applicable.append((promoter.PRIORITY, promoter)) if applicable: best_match = sorted(applicable, reverse=True)[0][1] return best_match(conf) else: raise ConfigurationError( 'No fetcher is applicable for "{0}"'.format(conf['name']) )
['def', 'fetcher_factory', '(', 'conf', ')', ':', 'global', 'PROMOTERS', 'applicable', '=', '[', ']', 'if', 'not', 'PROMOTERS', ':', 'PROMOTERS', '=', 'load_promoters', '(', ')', 'for', 'promoter', 'in', 'PROMOTERS', ':', 'if', 'promoter', '.', 'is_applicable', '(', 'conf', ')', ':', 'applicable', '.', 'append', '(', '(', 'promoter', '.', 'PRIORITY', ',', 'promoter', ')', ')', 'if', 'applicable', ':', 'best_match', '=', 'sorted', '(', 'applicable', ',', 'reverse', '=', 'True', ')', '[', '0', ']', '[', '1', ']', 'return', 'best_match', '(', 'conf', ')', 'else', ':', 'raise', 'ConfigurationError', '(', '\'No fetcher is applicable for "{0}"\'', '.', 'format', '(', 'conf', '[', "'name'", ']', ')', ')']
Return initialized fetcher capable of processing given conf.
['Return', 'initialized', 'fetcher', 'capable', 'of', 'processing', 'given', 'conf', '.']
train
https://github.com/kibitzr/kibitzr/blob/749da312488f1dda1ed1093cf4c95aaac0a604f7/kibitzr/fetcher/factory.py#L22-L37
1,229
log2timeline/plaso
plaso/parsers/mediator.py
ParserMediator.SetStorageWriter
def SetStorageWriter(self, storage_writer): """Sets the storage writer. Args: storage_writer (StorageWriter): storage writer. """ self._storage_writer = storage_writer # Reset the last event data information. Each storage file should # contain event data for their events. self._last_event_data_hash = None self._last_event_data_identifier = None
python
def SetStorageWriter(self, storage_writer): """Sets the storage writer. Args: storage_writer (StorageWriter): storage writer. """ self._storage_writer = storage_writer # Reset the last event data information. Each storage file should # contain event data for their events. self._last_event_data_hash = None self._last_event_data_identifier = None
['def', 'SetStorageWriter', '(', 'self', ',', 'storage_writer', ')', ':', 'self', '.', '_storage_writer', '=', 'storage_writer', '# Reset the last event data information. Each storage file should', '# contain event data for their events.', 'self', '.', '_last_event_data_hash', '=', 'None', 'self', '.', '_last_event_data_identifier', '=', 'None']
Sets the storage writer. Args: storage_writer (StorageWriter): storage writer.
['Sets', 'the', 'storage', 'writer', '.']
train
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/mediator.py#L628-L639
1,230
lekhakpadmanabh/Summarizer
smrzr/core.py
summarize_url
def summarize_url(url, num_sentences=4, fmt='default'): '''returns: tuple containing * single-line summary candidate * key points in the format specified. ''' title, meta, full_text = goose_extractor(url) if not full_text: raise ArticleExtractionFail("Couldn't extract: {}".format(url)) its = _intertext_score(full_text) tss = _title_similarity_score(full_text,title) if _eval_meta_as_summary(meta): summ = meta if tss[0][2].lower() in summ.lower(): its, tss = _remove_title_from_tuples(its, tss) elif summ.lower() in tss[0][2].lower(): summ = tss[0][2] its, tss = _remove_title_from_tuples(its, tss) else: summ = tss[0][2] its, tss = _remove_title_from_tuples(its, tss) scores = [score[2] for score in _aggregrate_scores(its, tss, num_sentences)] formatted = Formatter(scores, fmt).frmt() return summ, formatted
python
def summarize_url(url, num_sentences=4, fmt='default'): '''returns: tuple containing * single-line summary candidate * key points in the format specified. ''' title, meta, full_text = goose_extractor(url) if not full_text: raise ArticleExtractionFail("Couldn't extract: {}".format(url)) its = _intertext_score(full_text) tss = _title_similarity_score(full_text,title) if _eval_meta_as_summary(meta): summ = meta if tss[0][2].lower() in summ.lower(): its, tss = _remove_title_from_tuples(its, tss) elif summ.lower() in tss[0][2].lower(): summ = tss[0][2] its, tss = _remove_title_from_tuples(its, tss) else: summ = tss[0][2] its, tss = _remove_title_from_tuples(its, tss) scores = [score[2] for score in _aggregrate_scores(its, tss, num_sentences)] formatted = Formatter(scores, fmt).frmt() return summ, formatted
['def', 'summarize_url', '(', 'url', ',', 'num_sentences', '=', '4', ',', 'fmt', '=', "'default'", ')', ':', 'title', ',', 'meta', ',', 'full_text', '=', 'goose_extractor', '(', 'url', ')', 'if', 'not', 'full_text', ':', 'raise', 'ArticleExtractionFail', '(', '"Couldn\'t extract: {}"', '.', 'format', '(', 'url', ')', ')', 'its', '=', '_intertext_score', '(', 'full_text', ')', 'tss', '=', '_title_similarity_score', '(', 'full_text', ',', 'title', ')', 'if', '_eval_meta_as_summary', '(', 'meta', ')', ':', 'summ', '=', 'meta', 'if', 'tss', '[', '0', ']', '[', '2', ']', '.', 'lower', '(', ')', 'in', 'summ', '.', 'lower', '(', ')', ':', 'its', ',', 'tss', '=', '_remove_title_from_tuples', '(', 'its', ',', 'tss', ')', 'elif', 'summ', '.', 'lower', '(', ')', 'in', 'tss', '[', '0', ']', '[', '2', ']', '.', 'lower', '(', ')', ':', 'summ', '=', 'tss', '[', '0', ']', '[', '2', ']', 'its', ',', 'tss', '=', '_remove_title_from_tuples', '(', 'its', ',', 'tss', ')', 'else', ':', 'summ', '=', 'tss', '[', '0', ']', '[', '2', ']', 'its', ',', 'tss', '=', '_remove_title_from_tuples', '(', 'its', ',', 'tss', ')', 'scores', '=', '[', 'score', '[', '2', ']', 'for', 'score', 'in', '_aggregrate_scores', '(', 'its', ',', 'tss', ',', 'num_sentences', ')', ']', 'formatted', '=', 'Formatter', '(', 'scores', ',', 'fmt', ')', '.', 'frmt', '(', ')', 'return', 'summ', ',', 'formatted']
returns: tuple containing * single-line summary candidate * key points in the format specified.
['returns', ':', 'tuple', 'containing', '*', 'single', '-', 'line', 'summary', 'candidate', '*', 'key', 'points', 'in', 'the', 'format', 'specified', '.']
train
https://github.com/lekhakpadmanabh/Summarizer/blob/143456a48217905c720d87331f410e5c8b4e24aa/smrzr/core.py#L128-L156
1,231
JoelBender/bacpypes
py25/bacpypes/netservice.py
NetworkAdapter.process_npdu
def process_npdu(self, npdu): """Encode NPDUs from the service access point and send them downstream.""" if _debug: NetworkAdapter._debug("process_npdu %r (net=%r)", npdu, self.adapterNet) pdu = PDU(user_data=npdu.pduUserData) npdu.encode(pdu) self.request(pdu)
python
def process_npdu(self, npdu): """Encode NPDUs from the service access point and send them downstream.""" if _debug: NetworkAdapter._debug("process_npdu %r (net=%r)", npdu, self.adapterNet) pdu = PDU(user_data=npdu.pduUserData) npdu.encode(pdu) self.request(pdu)
['def', 'process_npdu', '(', 'self', ',', 'npdu', ')', ':', 'if', '_debug', ':', 'NetworkAdapter', '.', '_debug', '(', '"process_npdu %r (net=%r)"', ',', 'npdu', ',', 'self', '.', 'adapterNet', ')', 'pdu', '=', 'PDU', '(', 'user_data', '=', 'npdu', '.', 'pduUserData', ')', 'npdu', '.', 'encode', '(', 'pdu', ')', 'self', '.', 'request', '(', 'pdu', ')']
Encode NPDUs from the service access point and send them downstream.
['Encode', 'NPDUs', 'from', 'the', 'service', 'access', 'point', 'and', 'send', 'them', 'downstream', '.']
train
https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/py25/bacpypes/netservice.py#L190-L196
1,232
adrianliaw/PyCuber
pycuber/solver/cfop/cross.py
CrossSolver.cross_state_value
def cross_state_value(state): """ Compute the state value of the cross solving search. """ centres, edges = state value = 0 for edge in edges: if "U" in edge: if edge["U"] == centres["D"]["D"]: value += 1 else: value += 2 elif "D" in edge: if edge["D"] != centres["D"]["D"]: value += 3 else: value += 1 edgeposes = {} counts = {f: 0 for f in "LFRB"} ngedges = [] for edge in edges: if "U" in edge and edge["U"] == centres["D"]["D"]: k = "".join(edge.facings.keys()).replace("U", "") edgeposes[k] = edge[k] counts[k] += 1 elif "D" in edge and edge["D"] == centres["D"]["D"]: k = "".join(edge.facings.keys()).replace("D", "") edgeposes[k] = edge[k] counts[k] += 1 elif "U" in edge or "D" in edge: ngedges.append(edge) else: for k, s in edge: if s != centres["D"]["D"]: edgeposes[k] = s counts[k] += 1 break for edge in ngedges: idx = "LFRB".index(edge[centres["D"].colour]) for i in [-1, 1]: if "LFRB"[(idx+1)%4] not in edgeposes: k = "".join(edge.facings.keys()).replace("LFRB"[idx], "") edgeposes["LFRB"[(idx+1)%4]] = edge[k] counts["LFRB"[(idx+1)%4]] += 1 break else: k = "".join(edge.facings.keys()).replace("LFRB"[idx], "") if counts["LFRB"[(idx-1)%4]] > counts["LFRB"[(idx+1)%4]]: edgeposes["LFRB"[(idx-1)%4]] = edge[k] else: edgeposes["LFRB"[(idx+1)%4]] = edge[k] relative_pos = {f: centres[f][f] for f in "LFRB"} if len(edgeposes) == 4: for i in range(4): edgeposes["L"], edgeposes["F"], edgeposes["R"], edgeposes["B"] = \ edgeposes["F"], edgeposes["R"], edgeposes["B"], edgeposes["L"] if edgeposes == relative_pos: break else: value += 5 else: value += 3 return value
python
def cross_state_value(state): """ Compute the state value of the cross solving search. """ centres, edges = state value = 0 for edge in edges: if "U" in edge: if edge["U"] == centres["D"]["D"]: value += 1 else: value += 2 elif "D" in edge: if edge["D"] != centres["D"]["D"]: value += 3 else: value += 1 edgeposes = {} counts = {f: 0 for f in "LFRB"} ngedges = [] for edge in edges: if "U" in edge and edge["U"] == centres["D"]["D"]: k = "".join(edge.facings.keys()).replace("U", "") edgeposes[k] = edge[k] counts[k] += 1 elif "D" in edge and edge["D"] == centres["D"]["D"]: k = "".join(edge.facings.keys()).replace("D", "") edgeposes[k] = edge[k] counts[k] += 1 elif "U" in edge or "D" in edge: ngedges.append(edge) else: for k, s in edge: if s != centres["D"]["D"]: edgeposes[k] = s counts[k] += 1 break for edge in ngedges: idx = "LFRB".index(edge[centres["D"].colour]) for i in [-1, 1]: if "LFRB"[(idx+1)%4] not in edgeposes: k = "".join(edge.facings.keys()).replace("LFRB"[idx], "") edgeposes["LFRB"[(idx+1)%4]] = edge[k] counts["LFRB"[(idx+1)%4]] += 1 break else: k = "".join(edge.facings.keys()).replace("LFRB"[idx], "") if counts["LFRB"[(idx-1)%4]] > counts["LFRB"[(idx+1)%4]]: edgeposes["LFRB"[(idx-1)%4]] = edge[k] else: edgeposes["LFRB"[(idx+1)%4]] = edge[k] relative_pos = {f: centres[f][f] for f in "LFRB"} if len(edgeposes) == 4: for i in range(4): edgeposes["L"], edgeposes["F"], edgeposes["R"], edgeposes["B"] = \ edgeposes["F"], edgeposes["R"], edgeposes["B"], edgeposes["L"] if edgeposes == relative_pos: break else: value += 5 else: value += 3 return value
['def', 'cross_state_value', '(', 'state', ')', ':', 'centres', ',', 'edges', '=', 'state', 'value', '=', '0', 'for', 'edge', 'in', 'edges', ':', 'if', '"U"', 'in', 'edge', ':', 'if', 'edge', '[', '"U"', ']', '==', 'centres', '[', '"D"', ']', '[', '"D"', ']', ':', 'value', '+=', '1', 'else', ':', 'value', '+=', '2', 'elif', '"D"', 'in', 'edge', ':', 'if', 'edge', '[', '"D"', ']', '!=', 'centres', '[', '"D"', ']', '[', '"D"', ']', ':', 'value', '+=', '3', 'else', ':', 'value', '+=', '1', 'edgeposes', '=', '{', '}', 'counts', '=', '{', 'f', ':', '0', 'for', 'f', 'in', '"LFRB"', '}', 'ngedges', '=', '[', ']', 'for', 'edge', 'in', 'edges', ':', 'if', '"U"', 'in', 'edge', 'and', 'edge', '[', '"U"', ']', '==', 'centres', '[', '"D"', ']', '[', '"D"', ']', ':', 'k', '=', '""', '.', 'join', '(', 'edge', '.', 'facings', '.', 'keys', '(', ')', ')', '.', 'replace', '(', '"U"', ',', '""', ')', 'edgeposes', '[', 'k', ']', '=', 'edge', '[', 'k', ']', 'counts', '[', 'k', ']', '+=', '1', 'elif', '"D"', 'in', 'edge', 'and', 'edge', '[', '"D"', ']', '==', 'centres', '[', '"D"', ']', '[', '"D"', ']', ':', 'k', '=', '""', '.', 'join', '(', 'edge', '.', 'facings', '.', 'keys', '(', ')', ')', '.', 'replace', '(', '"D"', ',', '""', ')', 'edgeposes', '[', 'k', ']', '=', 'edge', '[', 'k', ']', 'counts', '[', 'k', ']', '+=', '1', 'elif', '"U"', 'in', 'edge', 'or', '"D"', 'in', 'edge', ':', 'ngedges', '.', 'append', '(', 'edge', ')', 'else', ':', 'for', 'k', ',', 's', 'in', 'edge', ':', 'if', 's', '!=', 'centres', '[', '"D"', ']', '[', '"D"', ']', ':', 'edgeposes', '[', 'k', ']', '=', 's', 'counts', '[', 'k', ']', '+=', '1', 'break', 'for', 'edge', 'in', 'ngedges', ':', 'idx', '=', '"LFRB"', '.', 'index', '(', 'edge', '[', 'centres', '[', '"D"', ']', '.', 'colour', ']', ')', 'for', 'i', 'in', '[', '-', '1', ',', '1', ']', ':', 'if', '"LFRB"', '[', '(', 'idx', '+', '1', ')', '%', '4', ']', 'not', 'in', 'edgeposes', ':', 'k', '=', '""', '.', 'join', '(', 'edge', '.', 'facings', '.', 'keys', '(', ')', ')', '.', 'replace', '(', '"LFRB"', '[', 'idx', ']', ',', '""', ')', 'edgeposes', '[', '"LFRB"', '[', '(', 'idx', '+', '1', ')', '%', '4', ']', ']', '=', 'edge', '[', 'k', ']', 'counts', '[', '"LFRB"', '[', '(', 'idx', '+', '1', ')', '%', '4', ']', ']', '+=', '1', 'break', 'else', ':', 'k', '=', '""', '.', 'join', '(', 'edge', '.', 'facings', '.', 'keys', '(', ')', ')', '.', 'replace', '(', '"LFRB"', '[', 'idx', ']', ',', '""', ')', 'if', 'counts', '[', '"LFRB"', '[', '(', 'idx', '-', '1', ')', '%', '4', ']', ']', '>', 'counts', '[', '"LFRB"', '[', '(', 'idx', '+', '1', ')', '%', '4', ']', ']', ':', 'edgeposes', '[', '"LFRB"', '[', '(', 'idx', '-', '1', ')', '%', '4', ']', ']', '=', 'edge', '[', 'k', ']', 'else', ':', 'edgeposes', '[', '"LFRB"', '[', '(', 'idx', '+', '1', ')', '%', '4', ']', ']', '=', 'edge', '[', 'k', ']', 'relative_pos', '=', '{', 'f', ':', 'centres', '[', 'f', ']', '[', 'f', ']', 'for', 'f', 'in', '"LFRB"', '}', 'if', 'len', '(', 'edgeposes', ')', '==', '4', ':', 'for', 'i', 'in', 'range', '(', '4', ')', ':', 'edgeposes', '[', '"L"', ']', ',', 'edgeposes', '[', '"F"', ']', ',', 'edgeposes', '[', '"R"', ']', ',', 'edgeposes', '[', '"B"', ']', '=', 'edgeposes', '[', '"F"', ']', ',', 'edgeposes', '[', '"R"', ']', ',', 'edgeposes', '[', '"B"', ']', ',', 'edgeposes', '[', '"L"', ']', 'if', 'edgeposes', '==', 'relative_pos', ':', 'break', 'else', ':', 'value', '+=', '5', 'else', ':', 'value', '+=', '3', 'return', 'value']
Compute the state value of the cross solving search.
['Compute', 'the', 'state', 'value', 'of', 'the', 'cross', 'solving', 'search', '.']
train
https://github.com/adrianliaw/PyCuber/blob/e44b5ba48c831b964ce73d046fb813222771853f/pycuber/solver/cfop/cross.py#L82-L144
1,233
oemof/demandlib
demandlib/bdew.py
HeatBuilding.weighted_temperature
def weighted_temperature(self, how='geometric_series'): r""" A new temperature vector is generated containing a multi-day average temperature as needed in the load profile function. Parameters ---------- how : string string which type to return ("geometric_series" or "mean") Notes ----- Equation for the mathematical series of the average tempaerature [1]_: .. math:: T=\frac{T_{D}+0.5\cdot T_{D-1}+0.25\cdot T_{D-2}+ 0.125\cdot T_{D-3}}{1+0.5+0.25+0.125} with :math:`T_D` = Average temperature on the present day :math:`T_{D-i}` = Average temperature on the day - i References ---------- .. [1] `BDEW <https://www.avacon.de/cps/rde/xbcr/avacon/15-06-30_Leitfaden_Abwicklung_SLP_Gas.pdf>`_, BDEW Documentation for heat profiles. """ # calculate daily mean temperature temperature = self.df['temperature'].resample('D').mean().reindex( self.df.index).fillna(method='ffill').fillna(method='bfill') if how == 'geometric_series': temperature_mean = (temperature + 0.5 * np.roll(temperature, 24) + 0.25 * np.roll(temperature, 48) + 0.125 * np.roll(temperature, 72)) / 1.875 elif how == 'mean': temperature_mean = temperature else: temperature_mean = None return temperature_mean
python
def weighted_temperature(self, how='geometric_series'): r""" A new temperature vector is generated containing a multi-day average temperature as needed in the load profile function. Parameters ---------- how : string string which type to return ("geometric_series" or "mean") Notes ----- Equation for the mathematical series of the average tempaerature [1]_: .. math:: T=\frac{T_{D}+0.5\cdot T_{D-1}+0.25\cdot T_{D-2}+ 0.125\cdot T_{D-3}}{1+0.5+0.25+0.125} with :math:`T_D` = Average temperature on the present day :math:`T_{D-i}` = Average temperature on the day - i References ---------- .. [1] `BDEW <https://www.avacon.de/cps/rde/xbcr/avacon/15-06-30_Leitfaden_Abwicklung_SLP_Gas.pdf>`_, BDEW Documentation for heat profiles. """ # calculate daily mean temperature temperature = self.df['temperature'].resample('D').mean().reindex( self.df.index).fillna(method='ffill').fillna(method='bfill') if how == 'geometric_series': temperature_mean = (temperature + 0.5 * np.roll(temperature, 24) + 0.25 * np.roll(temperature, 48) + 0.125 * np.roll(temperature, 72)) / 1.875 elif how == 'mean': temperature_mean = temperature else: temperature_mean = None return temperature_mean
['def', 'weighted_temperature', '(', 'self', ',', 'how', '=', "'geometric_series'", ')', ':', '# calculate daily mean temperature', 'temperature', '=', 'self', '.', 'df', '[', "'temperature'", ']', '.', 'resample', '(', "'D'", ')', '.', 'mean', '(', ')', '.', 'reindex', '(', 'self', '.', 'df', '.', 'index', ')', '.', 'fillna', '(', 'method', '=', "'ffill'", ')', '.', 'fillna', '(', 'method', '=', "'bfill'", ')', 'if', 'how', '==', "'geometric_series'", ':', 'temperature_mean', '=', '(', 'temperature', '+', '0.5', '*', 'np', '.', 'roll', '(', 'temperature', ',', '24', ')', '+', '0.25', '*', 'np', '.', 'roll', '(', 'temperature', ',', '48', ')', '+', '0.125', '*', 'np', '.', 'roll', '(', 'temperature', ',', '72', ')', ')', '/', '1.875', 'elif', 'how', '==', "'mean'", ':', 'temperature_mean', '=', 'temperature', 'else', ':', 'temperature_mean', '=', 'None', 'return', 'temperature_mean']
r""" A new temperature vector is generated containing a multi-day average temperature as needed in the load profile function. Parameters ---------- how : string string which type to return ("geometric_series" or "mean") Notes ----- Equation for the mathematical series of the average tempaerature [1]_: .. math:: T=\frac{T_{D}+0.5\cdot T_{D-1}+0.25\cdot T_{D-2}+ 0.125\cdot T_{D-3}}{1+0.5+0.25+0.125} with :math:`T_D` = Average temperature on the present day :math:`T_{D-i}` = Average temperature on the day - i References ---------- .. [1] `BDEW <https://www.avacon.de/cps/rde/xbcr/avacon/15-06-30_Leitfaden_Abwicklung_SLP_Gas.pdf>`_, BDEW Documentation for heat profiles.
['r', 'A', 'new', 'temperature', 'vector', 'is', 'generated', 'containing', 'a', 'multi', '-', 'day', 'average', 'temperature', 'as', 'needed', 'in', 'the', 'load', 'profile', 'function', '.']
train
https://github.com/oemof/demandlib/blob/4b62d60e05cb06eb2590f9c5655c2cdebf494080/demandlib/bdew.py#L178-L219
1,234
mabuchilab/QNET
src/qnet/printing/sreprprinter.py
IndentedSReprPrinter.emptyPrinter
def emptyPrinter(self, expr): """Fallback printer""" indent_str = " " * (self._print_level - 1) lines = [] if isinstance(expr.__class__, Singleton): # We exploit that Singletons override __expr__ to directly return # their name return indent_str + repr(expr) if isinstance(expr, Expression): args = expr.args keys = expr.minimal_kwargs.keys() lines.append(indent_str + expr.__class__.__name__ + "(") for arg in args: lines.append(self.doprint(arg) + ",") for key in keys: arg = expr.kwargs[key] lines.append( (" " * self._print_level) + key + '=' + self.doprint(arg).lstrip() + ",") if len(args) > 0 or len(keys) > 0: lines[-1] = lines[-1][:-1] # drop trailing comma for last arg lines[-1] += ")" elif isinstance(expr, (tuple, list)): delims = ("(", ")") if isinstance(expr, tuple) else ("[", "]") if len(expr) == 1: delims = (delims[0], "," + delims[1]) lines.append( indent_str + delims[0] + ", ".join([render_head_repr(v) for v in expr]) + delims[1]) else: lines.append(indent_str + SympyReprPrinter().doprint(expr)) return "\n".join(lines)
python
def emptyPrinter(self, expr): """Fallback printer""" indent_str = " " * (self._print_level - 1) lines = [] if isinstance(expr.__class__, Singleton): # We exploit that Singletons override __expr__ to directly return # their name return indent_str + repr(expr) if isinstance(expr, Expression): args = expr.args keys = expr.minimal_kwargs.keys() lines.append(indent_str + expr.__class__.__name__ + "(") for arg in args: lines.append(self.doprint(arg) + ",") for key in keys: arg = expr.kwargs[key] lines.append( (" " * self._print_level) + key + '=' + self.doprint(arg).lstrip() + ",") if len(args) > 0 or len(keys) > 0: lines[-1] = lines[-1][:-1] # drop trailing comma for last arg lines[-1] += ")" elif isinstance(expr, (tuple, list)): delims = ("(", ")") if isinstance(expr, tuple) else ("[", "]") if len(expr) == 1: delims = (delims[0], "," + delims[1]) lines.append( indent_str + delims[0] + ", ".join([render_head_repr(v) for v in expr]) + delims[1]) else: lines.append(indent_str + SympyReprPrinter().doprint(expr)) return "\n".join(lines)
['def', 'emptyPrinter', '(', 'self', ',', 'expr', ')', ':', 'indent_str', '=', '" "', '*', '(', 'self', '.', '_print_level', '-', '1', ')', 'lines', '=', '[', ']', 'if', 'isinstance', '(', 'expr', '.', '__class__', ',', 'Singleton', ')', ':', '# We exploit that Singletons override __expr__ to directly return', '# their name', 'return', 'indent_str', '+', 'repr', '(', 'expr', ')', 'if', 'isinstance', '(', 'expr', ',', 'Expression', ')', ':', 'args', '=', 'expr', '.', 'args', 'keys', '=', 'expr', '.', 'minimal_kwargs', '.', 'keys', '(', ')', 'lines', '.', 'append', '(', 'indent_str', '+', 'expr', '.', '__class__', '.', '__name__', '+', '"("', ')', 'for', 'arg', 'in', 'args', ':', 'lines', '.', 'append', '(', 'self', '.', 'doprint', '(', 'arg', ')', '+', '","', ')', 'for', 'key', 'in', 'keys', ':', 'arg', '=', 'expr', '.', 'kwargs', '[', 'key', ']', 'lines', '.', 'append', '(', '(', '" "', '*', 'self', '.', '_print_level', ')', '+', 'key', '+', "'='", '+', 'self', '.', 'doprint', '(', 'arg', ')', '.', 'lstrip', '(', ')', '+', '","', ')', 'if', 'len', '(', 'args', ')', '>', '0', 'or', 'len', '(', 'keys', ')', '>', '0', ':', 'lines', '[', '-', '1', ']', '=', 'lines', '[', '-', '1', ']', '[', ':', '-', '1', ']', '# drop trailing comma for last arg', 'lines', '[', '-', '1', ']', '+=', '")"', 'elif', 'isinstance', '(', 'expr', ',', '(', 'tuple', ',', 'list', ')', ')', ':', 'delims', '=', '(', '"("', ',', '")"', ')', 'if', 'isinstance', '(', 'expr', ',', 'tuple', ')', 'else', '(', '"["', ',', '"]"', ')', 'if', 'len', '(', 'expr', ')', '==', '1', ':', 'delims', '=', '(', 'delims', '[', '0', ']', ',', '","', '+', 'delims', '[', '1', ']', ')', 'lines', '.', 'append', '(', 'indent_str', '+', 'delims', '[', '0', ']', '+', '", "', '.', 'join', '(', '[', 'render_head_repr', '(', 'v', ')', 'for', 'v', 'in', 'expr', ']', ')', '+', 'delims', '[', '1', ']', ')', 'else', ':', 'lines', '.', 'append', '(', 'indent_str', '+', 'SympyReprPrinter', '(', ')', '.', 'doprint', '(', 'expr', ')', ')', 'return', '"\\n"', '.', 'join', '(', 'lines', ')']
Fallback printer
['Fallback', 'printer']
train
https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/printing/sreprprinter.py#L78-L110
1,235
apple/turicreate
src/unity/python/turicreate/extensions.py
_class_instance_from_name
def _class_instance_from_name(class_name, *arg, **kwarg): """ class_name is of the form modA.modB.modC.class module_path splits on "." and the import_path is then ['modA','modB','modC'] the __import__ call is really annoying but essentially it reads like: import class from modA.modB.modC - Then the module variable points to modC - Then you get the class from the module. """ # we first look in tc.extensions for the class name module_path = class_name.split('.') import_path = module_path[0:-1] module = __import__('.'.join(import_path), fromlist=[module_path[-1]]) class_ = getattr(module, module_path[-1]) instance = class_(*arg, **kwarg) return instance
python
def _class_instance_from_name(class_name, *arg, **kwarg): """ class_name is of the form modA.modB.modC.class module_path splits on "." and the import_path is then ['modA','modB','modC'] the __import__ call is really annoying but essentially it reads like: import class from modA.modB.modC - Then the module variable points to modC - Then you get the class from the module. """ # we first look in tc.extensions for the class name module_path = class_name.split('.') import_path = module_path[0:-1] module = __import__('.'.join(import_path), fromlist=[module_path[-1]]) class_ = getattr(module, module_path[-1]) instance = class_(*arg, **kwarg) return instance
['def', '_class_instance_from_name', '(', 'class_name', ',', '*', 'arg', ',', '*', '*', 'kwarg', ')', ':', '# we first look in tc.extensions for the class name', 'module_path', '=', 'class_name', '.', 'split', '(', "'.'", ')', 'import_path', '=', 'module_path', '[', '0', ':', '-', '1', ']', 'module', '=', '__import__', '(', "'.'", '.', 'join', '(', 'import_path', ')', ',', 'fromlist', '=', '[', 'module_path', '[', '-', '1', ']', ']', ')', 'class_', '=', 'getattr', '(', 'module', ',', 'module_path', '[', '-', '1', ']', ')', 'instance', '=', 'class_', '(', '*', 'arg', ',', '*', '*', 'kwarg', ')', 'return', 'instance']
class_name is of the form modA.modB.modC.class module_path splits on "." and the import_path is then ['modA','modB','modC'] the __import__ call is really annoying but essentially it reads like: import class from modA.modB.modC - Then the module variable points to modC - Then you get the class from the module.
['class_name', 'is', 'of', 'the', 'form', 'modA', '.', 'modB', '.', 'modC', '.', 'class', 'module_path', 'splits', 'on', '.', 'and', 'the', 'import_path', 'is', 'then', '[', 'modA', 'modB', 'modC', ']', 'the', '__import__', 'call', 'is', 'really', 'annoying', 'but', 'essentially', 'it', 'reads', 'like', ':']
train
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/extensions.py#L172-L190
1,236
elastic/elasticsearch-py
elasticsearch/client/xpack/rollup.py
RollupClient.get_jobs
def get_jobs(self, id=None, params=None): """ `<>`_ :arg id: The ID of the job(s) to fetch. Accepts glob patterns, or left blank for all jobs """ return self.transport.perform_request( "GET", _make_path("_rollup", "job", id), params=params )
python
def get_jobs(self, id=None, params=None): """ `<>`_ :arg id: The ID of the job(s) to fetch. Accepts glob patterns, or left blank for all jobs """ return self.transport.perform_request( "GET", _make_path("_rollup", "job", id), params=params )
['def', 'get_jobs', '(', 'self', ',', 'id', '=', 'None', ',', 'params', '=', 'None', ')', ':', 'return', 'self', '.', 'transport', '.', 'perform_request', '(', '"GET"', ',', '_make_path', '(', '"_rollup"', ',', '"job"', ',', 'id', ')', ',', 'params', '=', 'params', ')']
`<>`_ :arg id: The ID of the job(s) to fetch. Accepts glob patterns, or left blank for all jobs
['<', '>', '_']
train
https://github.com/elastic/elasticsearch-py/blob/2aab285c8f506f3863cbdaba3c90a685c510ba00/elasticsearch/client/xpack/rollup.py#L19-L28
1,237
scrapinghub/kafka-scanner
kafka_scanner/msg_processor_handlers.py
MsgProcessorHandlers.decompress_messages
def decompress_messages(self, partitions_offmsgs): """ Decompress pre-defined compressed fields for each message. """ for pomsg in partitions_offmsgs: if pomsg['message']: pomsg['message'] = self.decompress_fun(pomsg['message']) yield pomsg
python
def decompress_messages(self, partitions_offmsgs): """ Decompress pre-defined compressed fields for each message. """ for pomsg in partitions_offmsgs: if pomsg['message']: pomsg['message'] = self.decompress_fun(pomsg['message']) yield pomsg
['def', 'decompress_messages', '(', 'self', ',', 'partitions_offmsgs', ')', ':', 'for', 'pomsg', 'in', 'partitions_offmsgs', ':', 'if', 'pomsg', '[', "'message'", ']', ':', 'pomsg', '[', "'message'", ']', '=', 'self', '.', 'decompress_fun', '(', 'pomsg', '[', "'message'", ']', ')', 'yield', 'pomsg']
Decompress pre-defined compressed fields for each message.
['Decompress', 'pre', '-', 'defined', 'compressed', 'fields', 'for', 'each', 'message', '.']
train
https://github.com/scrapinghub/kafka-scanner/blob/8a71901012e8c948180f70a485b57f8d2e7e3ec1/kafka_scanner/msg_processor_handlers.py#L85-L91
1,238
pandas-dev/pandas
pandas/core/indexing.py
_LocIndexer._get_partial_string_timestamp_match_key
def _get_partial_string_timestamp_match_key(self, key, labels): """Translate any partial string timestamp matches in key, returning the new key (GH 10331)""" if isinstance(labels, MultiIndex): if (isinstance(key, str) and labels.levels[0].is_all_dates): # Convert key '2016-01-01' to # ('2016-01-01'[, slice(None, None, None)]+) key = tuple([key] + [slice(None)] * (len(labels.levels) - 1)) if isinstance(key, tuple): # Convert (..., '2016-01-01', ...) in tuple to # (..., slice('2016-01-01', '2016-01-01', None), ...) new_key = [] for i, component in enumerate(key): if (isinstance(component, str) and labels.levels[i].is_all_dates): new_key.append(slice(component, component, None)) else: new_key.append(component) key = tuple(new_key) return key
python
def _get_partial_string_timestamp_match_key(self, key, labels): """Translate any partial string timestamp matches in key, returning the new key (GH 10331)""" if isinstance(labels, MultiIndex): if (isinstance(key, str) and labels.levels[0].is_all_dates): # Convert key '2016-01-01' to # ('2016-01-01'[, slice(None, None, None)]+) key = tuple([key] + [slice(None)] * (len(labels.levels) - 1)) if isinstance(key, tuple): # Convert (..., '2016-01-01', ...) in tuple to # (..., slice('2016-01-01', '2016-01-01', None), ...) new_key = [] for i, component in enumerate(key): if (isinstance(component, str) and labels.levels[i].is_all_dates): new_key.append(slice(component, component, None)) else: new_key.append(component) key = tuple(new_key) return key
['def', '_get_partial_string_timestamp_match_key', '(', 'self', ',', 'key', ',', 'labels', ')', ':', 'if', 'isinstance', '(', 'labels', ',', 'MultiIndex', ')', ':', 'if', '(', 'isinstance', '(', 'key', ',', 'str', ')', 'and', 'labels', '.', 'levels', '[', '0', ']', '.', 'is_all_dates', ')', ':', "# Convert key '2016-01-01' to", "# ('2016-01-01'[, slice(None, None, None)]+)", 'key', '=', 'tuple', '(', '[', 'key', ']', '+', '[', 'slice', '(', 'None', ')', ']', '*', '(', 'len', '(', 'labels', '.', 'levels', ')', '-', '1', ')', ')', 'if', 'isinstance', '(', 'key', ',', 'tuple', ')', ':', "# Convert (..., '2016-01-01', ...) in tuple to", "# (..., slice('2016-01-01', '2016-01-01', None), ...)", 'new_key', '=', '[', ']', 'for', 'i', ',', 'component', 'in', 'enumerate', '(', 'key', ')', ':', 'if', '(', 'isinstance', '(', 'component', ',', 'str', ')', 'and', 'labels', '.', 'levels', '[', 'i', ']', '.', 'is_all_dates', ')', ':', 'new_key', '.', 'append', '(', 'slice', '(', 'component', ',', 'component', ',', 'None', ')', ')', 'else', ':', 'new_key', '.', 'append', '(', 'component', ')', 'key', '=', 'tuple', '(', 'new_key', ')', 'return', 'key']
Translate any partial string timestamp matches in key, returning the new key (GH 10331)
['Translate', 'any', 'partial', 'string', 'timestamp', 'matches', 'in', 'key', 'returning', 'the', 'new', 'key', '(', 'GH', '10331', ')']
train
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexing.py#L1835-L1856
1,239
haikuginger/beekeeper
beekeeper/api.py
Action.printed_out
def printed_out(self, name): """ Create a string representation of the action """ opt = self.variables().optional_namestring() req = self.variables().required_namestring() out = '' out += '| |\n' out += '| |---{}({}{})\n'.format(name, req, opt) if self.description: out += '| | {}\n'.format(self.description) return out
python
def printed_out(self, name): """ Create a string representation of the action """ opt = self.variables().optional_namestring() req = self.variables().required_namestring() out = '' out += '| |\n' out += '| |---{}({}{})\n'.format(name, req, opt) if self.description: out += '| | {}\n'.format(self.description) return out
['def', 'printed_out', '(', 'self', ',', 'name', ')', ':', 'opt', '=', 'self', '.', 'variables', '(', ')', '.', 'optional_namestring', '(', ')', 'req', '=', 'self', '.', 'variables', '(', ')', '.', 'required_namestring', '(', ')', 'out', '=', "''", 'out', '+=', "'| |\\n'", 'out', '+=', "'| |---{}({}{})\\n'", '.', 'format', '(', 'name', ',', 'req', ',', 'opt', ')', 'if', 'self', '.', 'description', ':', 'out', '+=', "'| | {}\\n'", '.', 'format', '(', 'self', '.', 'description', ')', 'return', 'out']
Create a string representation of the action
['Create', 'a', 'string', 'representation', 'of', 'the', 'action']
train
https://github.com/haikuginger/beekeeper/blob/b647d3add0b407ec5dc3a2a39c4f6dac31243b18/beekeeper/api.py#L208-L219
1,240
robinandeer/puzzle
puzzle/plugins/sql/mixins/actions/gemini.py
GeminiActions.add_gemini_query
def add_gemini_query(self, name, query): """Add a user defined gemini query Args: name (str) query (str) """ logger.info("Adding query {0} with text {1}".format(name, query)) new_query = GeminiQuery(name=name, query=query) self.session.add(new_query) self.save() return new_query
python
def add_gemini_query(self, name, query): """Add a user defined gemini query Args: name (str) query (str) """ logger.info("Adding query {0} with text {1}".format(name, query)) new_query = GeminiQuery(name=name, query=query) self.session.add(new_query) self.save() return new_query
['def', 'add_gemini_query', '(', 'self', ',', 'name', ',', 'query', ')', ':', 'logger', '.', 'info', '(', '"Adding query {0} with text {1}"', '.', 'format', '(', 'name', ',', 'query', ')', ')', 'new_query', '=', 'GeminiQuery', '(', 'name', '=', 'name', ',', 'query', '=', 'query', ')', 'self', '.', 'session', '.', 'add', '(', 'new_query', ')', 'self', '.', 'save', '(', ')', 'return', 'new_query']
Add a user defined gemini query Args: name (str) query (str)
['Add', 'a', 'user', 'defined', 'gemini', 'query']
train
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/sql/mixins/actions/gemini.py#L23-L34
1,241
inveniosoftware-attic/invenio-upgrader
invenio_upgrader/engine.py
UpgradeBase.repository
def repository(self): """Repository.""" m = re.match("(.+)(_\d{4}_\d{2}_\d{2}_)(.+)", self.__module__) if m: return m.group(1) m = re.match("(.+)(_release_)(.+)", self.__module__) if m: return m.group(1)
python
def repository(self): """Repository.""" m = re.match("(.+)(_\d{4}_\d{2}_\d{2}_)(.+)", self.__module__) if m: return m.group(1) m = re.match("(.+)(_release_)(.+)", self.__module__) if m: return m.group(1)
['def', 'repository', '(', 'self', ')', ':', 'm', '=', 're', '.', 'match', '(', '"(.+)(_\\d{4}_\\d{2}_\\d{2}_)(.+)"', ',', 'self', '.', '__module__', ')', 'if', 'm', ':', 'return', 'm', '.', 'group', '(', '1', ')', 'm', '=', 're', '.', 'match', '(', '"(.+)(_release_)(.+)"', ',', 'self', '.', '__module__', ')', 'if', 'm', ':', 'return', 'm', '.', 'group', '(', '1', ')']
Repository.
['Repository', '.']
train
https://github.com/inveniosoftware-attic/invenio-upgrader/blob/cee4bcb118515463ecf6de1421642007f79a9fcd/invenio_upgrader/engine.py#L507-L514
1,242
apache/incubator-superset
superset/models/core.py
Database.all_table_names_in_schema
def all_table_names_in_schema(self, schema, cache=False, cache_timeout=None, force=False): """Parameters need to be passed as keyword arguments. For unused parameters, they are referenced in cache_util.memoized_func decorator. :param schema: schema name :type schema: str :param cache: whether cache is enabled for the function :type cache: bool :param cache_timeout: timeout in seconds for the cache :type cache_timeout: int :param force: whether to force refresh the cache :type force: bool :return: table list :rtype: list """ tables = [] try: tables = self.db_engine_spec.get_table_names( inspector=self.inspector, schema=schema) except Exception as e: logging.exception(e) return tables
python
def all_table_names_in_schema(self, schema, cache=False, cache_timeout=None, force=False): """Parameters need to be passed as keyword arguments. For unused parameters, they are referenced in cache_util.memoized_func decorator. :param schema: schema name :type schema: str :param cache: whether cache is enabled for the function :type cache: bool :param cache_timeout: timeout in seconds for the cache :type cache_timeout: int :param force: whether to force refresh the cache :type force: bool :return: table list :rtype: list """ tables = [] try: tables = self.db_engine_spec.get_table_names( inspector=self.inspector, schema=schema) except Exception as e: logging.exception(e) return tables
['def', 'all_table_names_in_schema', '(', 'self', ',', 'schema', ',', 'cache', '=', 'False', ',', 'cache_timeout', '=', 'None', ',', 'force', '=', 'False', ')', ':', 'tables', '=', '[', ']', 'try', ':', 'tables', '=', 'self', '.', 'db_engine_spec', '.', 'get_table_names', '(', 'inspector', '=', 'self', '.', 'inspector', ',', 'schema', '=', 'schema', ')', 'except', 'Exception', 'as', 'e', ':', 'logging', '.', 'exception', '(', 'e', ')', 'return', 'tables']
Parameters need to be passed as keyword arguments. For unused parameters, they are referenced in cache_util.memoized_func decorator. :param schema: schema name :type schema: str :param cache: whether cache is enabled for the function :type cache: bool :param cache_timeout: timeout in seconds for the cache :type cache_timeout: int :param force: whether to force refresh the cache :type force: bool :return: table list :rtype: list
['Parameters', 'need', 'to', 'be', 'passed', 'as', 'keyword', 'arguments', '.']
train
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/models/core.py#L954-L978
1,243
erinxocon/spotify-local
src/spotify_local/core.py
SpotifyLocal._request
def _request(self, url, params={}): """Makes a request using the currently open session. :param url: A url fragment to use in the creation of the master url """ r = self._session.get(url=url, params=params, headers=DEFAULT_ORIGIN) return r
python
def _request(self, url, params={}): """Makes a request using the currently open session. :param url: A url fragment to use in the creation of the master url """ r = self._session.get(url=url, params=params, headers=DEFAULT_ORIGIN) return r
['def', '_request', '(', 'self', ',', 'url', ',', 'params', '=', '{', '}', ')', ':', 'r', '=', 'self', '.', '_session', '.', 'get', '(', 'url', '=', 'url', ',', 'params', '=', 'params', ',', 'headers', '=', 'DEFAULT_ORIGIN', ')', 'return', 'r']
Makes a request using the currently open session. :param url: A url fragment to use in the creation of the master url
['Makes', 'a', 'request', 'using', 'the', 'currently', 'open', 'session', '.']
train
https://github.com/erinxocon/spotify-local/blob/8188eef221e3d8b9f408ff430d80e74560360459/src/spotify_local/core.py#L29-L35
1,244
pandas-dev/pandas
pandas/core/indexes/interval.py
_is_valid_endpoint
def _is_valid_endpoint(endpoint): """helper for interval_range to check if start/end are valid types""" return any([is_number(endpoint), isinstance(endpoint, Timestamp), isinstance(endpoint, Timedelta), endpoint is None])
python
def _is_valid_endpoint(endpoint): """helper for interval_range to check if start/end are valid types""" return any([is_number(endpoint), isinstance(endpoint, Timestamp), isinstance(endpoint, Timedelta), endpoint is None])
['def', '_is_valid_endpoint', '(', 'endpoint', ')', ':', 'return', 'any', '(', '[', 'is_number', '(', 'endpoint', ')', ',', 'isinstance', '(', 'endpoint', ',', 'Timestamp', ')', ',', 'isinstance', '(', 'endpoint', ',', 'Timedelta', ')', ',', 'endpoint', 'is', 'None', ']', ')']
helper for interval_range to check if start/end are valid types
['helper', 'for', 'interval_range', 'to', 'check', 'if', 'start', '/', 'end', 'are', 'valid', 'types']
train
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/interval.py#L1138-L1143
1,245
tensorflow/mesh
mesh_tensorflow/auto_mtf/layout.py
layout
def layout(mtf_graph, mesh_shape, mtf_outputs=()): """Compute layout rules based on a computational graph and mesh shape. Args: mtf_graph: a mtf.Graph. mesh_shape: an mtf.Shape, str, or listlike of mtf.Dimension. mtf_outputs: an optional iterable of mtf.Tensor, representing the outputs of the computation. Returns: a mtf.LayoutRules """ mesh_shape = mtf.convert_to_shape(mesh_shape) estimator = memory_estimator.MemoryEstimator(mtf_graph, mesh_shape, mtf_outputs) optimizer = layout_optimizer.LayoutOptimizer(estimator) return mtf.convert_to_layout_rules(optimizer.solve())
python
def layout(mtf_graph, mesh_shape, mtf_outputs=()): """Compute layout rules based on a computational graph and mesh shape. Args: mtf_graph: a mtf.Graph. mesh_shape: an mtf.Shape, str, or listlike of mtf.Dimension. mtf_outputs: an optional iterable of mtf.Tensor, representing the outputs of the computation. Returns: a mtf.LayoutRules """ mesh_shape = mtf.convert_to_shape(mesh_shape) estimator = memory_estimator.MemoryEstimator(mtf_graph, mesh_shape, mtf_outputs) optimizer = layout_optimizer.LayoutOptimizer(estimator) return mtf.convert_to_layout_rules(optimizer.solve())
['def', 'layout', '(', 'mtf_graph', ',', 'mesh_shape', ',', 'mtf_outputs', '=', '(', ')', ')', ':', 'mesh_shape', '=', 'mtf', '.', 'convert_to_shape', '(', 'mesh_shape', ')', 'estimator', '=', 'memory_estimator', '.', 'MemoryEstimator', '(', 'mtf_graph', ',', 'mesh_shape', ',', 'mtf_outputs', ')', 'optimizer', '=', 'layout_optimizer', '.', 'LayoutOptimizer', '(', 'estimator', ')', 'return', 'mtf', '.', 'convert_to_layout_rules', '(', 'optimizer', '.', 'solve', '(', ')', ')']
Compute layout rules based on a computational graph and mesh shape. Args: mtf_graph: a mtf.Graph. mesh_shape: an mtf.Shape, str, or listlike of mtf.Dimension. mtf_outputs: an optional iterable of mtf.Tensor, representing the outputs of the computation. Returns: a mtf.LayoutRules
['Compute', 'layout', 'rules', 'based', 'on', 'a', 'computational', 'graph', 'and', 'mesh', 'shape', '.']
train
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/auto_mtf/layout.py#L47-L63
1,246
ejeschke/ginga
ginga/canvas/types/astro.py
Annulus.sync_state
def sync_state(self): """Called to synchronize state (e.g. when parameters have changed). """ oradius = self.radius + self.width if oradius < self.radius: raise ValueError('Outer boundary < inner boundary') d = dict(points=self.points, radius=self.radius, color=self.color, linewidth=self.linewidth, linestyle=self.linestyle, alpha=self.alpha) # update inner object self.objects[0].__dict__.update(d) # update outer object d['radius'] = oradius self.objects[1].__dict__.update(d)
python
def sync_state(self): """Called to synchronize state (e.g. when parameters have changed). """ oradius = self.radius + self.width if oradius < self.radius: raise ValueError('Outer boundary < inner boundary') d = dict(points=self.points, radius=self.radius, color=self.color, linewidth=self.linewidth, linestyle=self.linestyle, alpha=self.alpha) # update inner object self.objects[0].__dict__.update(d) # update outer object d['radius'] = oradius self.objects[1].__dict__.update(d)
['def', 'sync_state', '(', 'self', ')', ':', 'oradius', '=', 'self', '.', 'radius', '+', 'self', '.', 'width', 'if', 'oradius', '<', 'self', '.', 'radius', ':', 'raise', 'ValueError', '(', "'Outer boundary < inner boundary'", ')', 'd', '=', 'dict', '(', 'points', '=', 'self', '.', 'points', ',', 'radius', '=', 'self', '.', 'radius', ',', 'color', '=', 'self', '.', 'color', ',', 'linewidth', '=', 'self', '.', 'linewidth', ',', 'linestyle', '=', 'self', '.', 'linestyle', ',', 'alpha', '=', 'self', '.', 'alpha', ')', '# update inner object', 'self', '.', 'objects', '[', '0', ']', '.', '__dict__', '.', 'update', '(', 'd', ')', '# update outer object', 'd', '[', "'radius'", ']', '=', 'oradius', 'self', '.', 'objects', '[', '1', ']', '.', '__dict__', '.', 'update', '(', 'd', ')']
Called to synchronize state (e.g. when parameters have changed).
['Called', 'to', 'synchronize', 'state', '(', 'e', '.', 'g', '.', 'when', 'parameters', 'have', 'changed', ')', '.']
train
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/canvas/types/astro.py#L713-L729
1,247
davebridges/mousedb
mousedb/animal/views.py
breeding_wean
def breeding_wean(request, breeding_id): """This view is used to generate a form by which to wean pups which belong to a particular breeding set. This view typically is used to wean existing pups. This includes the MouseID, Cage, Markings, Gender and Wean Date fields. For other fields use the breeding-change page. It takes a request in the form /breeding/(breeding_id)/wean/ and returns a form specific to the breeding set defined in breeding_id. breeding_id is the background identification number of the breeding set and does not refer to the barcode of any breeding cage. This view returns a formset in which one row represents one animal. To add extra animals to a breeding set use /breeding/(breeding_id)/pups/. This view is restricted to those with the permission animal.change_animal. """ breeding = Breeding.objects.get(id=breeding_id) strain = breeding.Strain PupsFormSet = inlineformset_factory(Breeding, Animal, extra=0, exclude=('Alive','Father', 'Mother', 'Breeding', 'Notes','Rack','Rack_Position','Strain','Background','Genotype','Death','Cause_of_Death','Backcross','Generation')) if request.method =="POST": formset = PupsFormSet(request.POST, instance=breeding, queryset=Animal.objects.filter(Alive=True, Weaned__isnull=True)) if formset.is_valid(): formset.save() return HttpResponseRedirect( breeding.get_absolute_url() ) else: formset = PupsFormSet(instance=breeding, queryset=Animal.objects.filter(Alive=True, Weaned__isnull=True)) return render(request, "breeding_wean.html", {"formset":formset, 'breeding':breeding})
python
def breeding_wean(request, breeding_id): """This view is used to generate a form by which to wean pups which belong to a particular breeding set. This view typically is used to wean existing pups. This includes the MouseID, Cage, Markings, Gender and Wean Date fields. For other fields use the breeding-change page. It takes a request in the form /breeding/(breeding_id)/wean/ and returns a form specific to the breeding set defined in breeding_id. breeding_id is the background identification number of the breeding set and does not refer to the barcode of any breeding cage. This view returns a formset in which one row represents one animal. To add extra animals to a breeding set use /breeding/(breeding_id)/pups/. This view is restricted to those with the permission animal.change_animal. """ breeding = Breeding.objects.get(id=breeding_id) strain = breeding.Strain PupsFormSet = inlineformset_factory(Breeding, Animal, extra=0, exclude=('Alive','Father', 'Mother', 'Breeding', 'Notes','Rack','Rack_Position','Strain','Background','Genotype','Death','Cause_of_Death','Backcross','Generation')) if request.method =="POST": formset = PupsFormSet(request.POST, instance=breeding, queryset=Animal.objects.filter(Alive=True, Weaned__isnull=True)) if formset.is_valid(): formset.save() return HttpResponseRedirect( breeding.get_absolute_url() ) else: formset = PupsFormSet(instance=breeding, queryset=Animal.objects.filter(Alive=True, Weaned__isnull=True)) return render(request, "breeding_wean.html", {"formset":formset, 'breeding':breeding})
['def', 'breeding_wean', '(', 'request', ',', 'breeding_id', ')', ':', 'breeding', '=', 'Breeding', '.', 'objects', '.', 'get', '(', 'id', '=', 'breeding_id', ')', 'strain', '=', 'breeding', '.', 'Strain', 'PupsFormSet', '=', 'inlineformset_factory', '(', 'Breeding', ',', 'Animal', ',', 'extra', '=', '0', ',', 'exclude', '=', '(', "'Alive'", ',', "'Father'", ',', "'Mother'", ',', "'Breeding'", ',', "'Notes'", ',', "'Rack'", ',', "'Rack_Position'", ',', "'Strain'", ',', "'Background'", ',', "'Genotype'", ',', "'Death'", ',', "'Cause_of_Death'", ',', "'Backcross'", ',', "'Generation'", ')', ')', 'if', 'request', '.', 'method', '==', '"POST"', ':', 'formset', '=', 'PupsFormSet', '(', 'request', '.', 'POST', ',', 'instance', '=', 'breeding', ',', 'queryset', '=', 'Animal', '.', 'objects', '.', 'filter', '(', 'Alive', '=', 'True', ',', 'Weaned__isnull', '=', 'True', ')', ')', 'if', 'formset', '.', 'is_valid', '(', ')', ':', 'formset', '.', 'save', '(', ')', 'return', 'HttpResponseRedirect', '(', 'breeding', '.', 'get_absolute_url', '(', ')', ')', 'else', ':', 'formset', '=', 'PupsFormSet', '(', 'instance', '=', 'breeding', ',', 'queryset', '=', 'Animal', '.', 'objects', '.', 'filter', '(', 'Alive', '=', 'True', ',', 'Weaned__isnull', '=', 'True', ')', ')', 'return', 'render', '(', 'request', ',', '"breeding_wean.html"', ',', '{', '"formset"', ':', 'formset', ',', "'breeding'", ':', 'breeding', '}', ')']
This view is used to generate a form by which to wean pups which belong to a particular breeding set. This view typically is used to wean existing pups. This includes the MouseID, Cage, Markings, Gender and Wean Date fields. For other fields use the breeding-change page. It takes a request in the form /breeding/(breeding_id)/wean/ and returns a form specific to the breeding set defined in breeding_id. breeding_id is the background identification number of the breeding set and does not refer to the barcode of any breeding cage. This view returns a formset in which one row represents one animal. To add extra animals to a breeding set use /breeding/(breeding_id)/pups/. This view is restricted to those with the permission animal.change_animal.
['This', 'view', 'is', 'used', 'to', 'generate', 'a', 'form', 'by', 'which', 'to', 'wean', 'pups', 'which', 'belong', 'to', 'a', 'particular', 'breeding', 'set', '.', 'This', 'view', 'typically', 'is', 'used', 'to', 'wean', 'existing', 'pups', '.', 'This', 'includes', 'the', 'MouseID', 'Cage', 'Markings', 'Gender', 'and', 'Wean', 'Date', 'fields', '.', 'For', 'other', 'fields', 'use', 'the', 'breeding', '-', 'change', 'page', '.', 'It', 'takes', 'a', 'request', 'in', 'the', 'form', '/', 'breeding', '/', '(', 'breeding_id', ')', '/', 'wean', '/', 'and', 'returns', 'a', 'form', 'specific', 'to', 'the', 'breeding', 'set', 'defined', 'in', 'breeding_id', '.', 'breeding_id', 'is', 'the', 'background', 'identification', 'number', 'of', 'the', 'breeding', 'set', 'and', 'does', 'not', 'refer', 'to', 'the', 'barcode', 'of', 'any', 'breeding', 'cage', '.', 'This', 'view', 'returns', 'a', 'formset', 'in', 'which', 'one', 'row', 'represents', 'one', 'animal', '.', 'To', 'add', 'extra', 'animals', 'to', 'a', 'breeding', 'set', 'use', '/', 'breeding', '/', '(', 'breeding_id', ')', '/', 'pups', '/', '.', 'This', 'view', 'is', 'restricted', 'to', 'those', 'with', 'the', 'permission', 'animal', '.', 'change_animal', '.']
train
https://github.com/davebridges/mousedb/blob/2a33f6d15d88b1540b05f7232b154fdbf8568580/mousedb/animal/views.py#L395-L413
1,248
pycontribs/jira
jira/client.py
JIRA.remove_group
def remove_group(self, groupname): """Delete a group from the JIRA instance. :param groupname: The group to be deleted from the JIRA instance. :type groupname: str :return: Boolean. Returns True on success. :rtype: bool """ # implementation based on # https://docs.atlassian.com/jira/REST/ondemand/#d2e5173 url = self._options['server'] + '/rest/api/latest/group' x = {'groupname': groupname} self._session.delete(url, params=x) return True
python
def remove_group(self, groupname): """Delete a group from the JIRA instance. :param groupname: The group to be deleted from the JIRA instance. :type groupname: str :return: Boolean. Returns True on success. :rtype: bool """ # implementation based on # https://docs.atlassian.com/jira/REST/ondemand/#d2e5173 url = self._options['server'] + '/rest/api/latest/group' x = {'groupname': groupname} self._session.delete(url, params=x) return True
['def', 'remove_group', '(', 'self', ',', 'groupname', ')', ':', '# implementation based on', '# https://docs.atlassian.com/jira/REST/ondemand/#d2e5173', 'url', '=', 'self', '.', '_options', '[', "'server'", ']', '+', "'/rest/api/latest/group'", 'x', '=', '{', "'groupname'", ':', 'groupname', '}', 'self', '.', '_session', '.', 'delete', '(', 'url', ',', 'params', '=', 'x', ')', 'return', 'True']
Delete a group from the JIRA instance. :param groupname: The group to be deleted from the JIRA instance. :type groupname: str :return: Boolean. Returns True on success. :rtype: bool
['Delete', 'a', 'group', 'from', 'the', 'JIRA', 'instance', '.']
train
https://github.com/pycontribs/jira/blob/397db5d78441ed6a680a9b7db4c62030ade1fd8a/jira/client.py#L1207-L1220
1,249
fermiPy/fermipy
fermipy/gtanalysis.py
GTBinnedAnalysis.counts_map
def counts_map(self): """Return 3-D counts map for this component as a Map object. Returns ------- map : `~fermipy.skymap.MapBase` """ try: if isinstance(self.like, gtutils.SummedLikelihood): cmap = self.like.components[0].logLike.countsMap() p_method = cmap.projection().method() else: cmap = self.like.logLike.countsMap() p_method = cmap.projection().method() except Exception: p_method = 0 if p_method == 0: # WCS z = cmap.data() z = np.array(z).reshape(self.enumbins, self.npix, self.npix) return WcsNDMap(copy.deepcopy(self.geom), z) elif p_method == 1: # HPX z = cmap.data() z = np.array(z).reshape(self.enumbins, np.max(self.geom.npix)) return HpxNDMap(copy.deepcopy(self.geom), z) else: self.logger.error('Did not recognize CountsMap type %i' % p_method, exc_info=True) return None
python
def counts_map(self): """Return 3-D counts map for this component as a Map object. Returns ------- map : `~fermipy.skymap.MapBase` """ try: if isinstance(self.like, gtutils.SummedLikelihood): cmap = self.like.components[0].logLike.countsMap() p_method = cmap.projection().method() else: cmap = self.like.logLike.countsMap() p_method = cmap.projection().method() except Exception: p_method = 0 if p_method == 0: # WCS z = cmap.data() z = np.array(z).reshape(self.enumbins, self.npix, self.npix) return WcsNDMap(copy.deepcopy(self.geom), z) elif p_method == 1: # HPX z = cmap.data() z = np.array(z).reshape(self.enumbins, np.max(self.geom.npix)) return HpxNDMap(copy.deepcopy(self.geom), z) else: self.logger.error('Did not recognize CountsMap type %i' % p_method, exc_info=True) return None
['def', 'counts_map', '(', 'self', ')', ':', 'try', ':', 'if', 'isinstance', '(', 'self', '.', 'like', ',', 'gtutils', '.', 'SummedLikelihood', ')', ':', 'cmap', '=', 'self', '.', 'like', '.', 'components', '[', '0', ']', '.', 'logLike', '.', 'countsMap', '(', ')', 'p_method', '=', 'cmap', '.', 'projection', '(', ')', '.', 'method', '(', ')', 'else', ':', 'cmap', '=', 'self', '.', 'like', '.', 'logLike', '.', 'countsMap', '(', ')', 'p_method', '=', 'cmap', '.', 'projection', '(', ')', '.', 'method', '(', ')', 'except', 'Exception', ':', 'p_method', '=', '0', 'if', 'p_method', '==', '0', ':', '# WCS', 'z', '=', 'cmap', '.', 'data', '(', ')', 'z', '=', 'np', '.', 'array', '(', 'z', ')', '.', 'reshape', '(', 'self', '.', 'enumbins', ',', 'self', '.', 'npix', ',', 'self', '.', 'npix', ')', 'return', 'WcsNDMap', '(', 'copy', '.', 'deepcopy', '(', 'self', '.', 'geom', ')', ',', 'z', ')', 'elif', 'p_method', '==', '1', ':', '# HPX', 'z', '=', 'cmap', '.', 'data', '(', ')', 'z', '=', 'np', '.', 'array', '(', 'z', ')', '.', 'reshape', '(', 'self', '.', 'enumbins', ',', 'np', '.', 'max', '(', 'self', '.', 'geom', '.', 'npix', ')', ')', 'return', 'HpxNDMap', '(', 'copy', '.', 'deepcopy', '(', 'self', '.', 'geom', ')', ',', 'z', ')', 'else', ':', 'self', '.', 'logger', '.', 'error', '(', "'Did not recognize CountsMap type %i'", '%', 'p_method', ',', 'exc_info', '=', 'True', ')', 'return', 'None']
Return 3-D counts map for this component as a Map object. Returns ------- map : `~fermipy.skymap.MapBase`
['Return', '3', '-', 'D', 'counts', 'map', 'for', 'this', 'component', 'as', 'a', 'Map', 'object', '.']
train
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/gtanalysis.py#L4787-L4816
1,250
maweigert/gputools
gputools/convolve/convolve.py
convolve
def convolve(data, h, res_g=None, sub_blocks=None): """ convolves 1d-3d data with kernel h data and h can either be numpy arrays or gpu buffer objects (OCLArray, which must be float32 then) boundary conditions are clamping to zero at edge. """ if not len(data.shape) in [1, 2, 3]: raise ValueError("dim = %s not supported" % (len(data.shape))) if len(data.shape) != len(h.shape): raise ValueError("dimemnsion of data (%s) and h (%s) are different" % (len(data.shape), len(h.shape))) if isinstance(data, OCLArray) and isinstance(h, OCLArray): return _convolve_buf(data, h, res_g) elif isinstance(data, np.ndarray) and isinstance(h, np.ndarray): if sub_blocks == (1,) * len(data.shape) or sub_blocks is None: return _convolve_np(data, h) else: # cut the image into tile and operate on every of them N_sub = [int(np.ceil(1. * n / s)) for n, s in zip(data.shape, sub_blocks)] Npads = [int(s / 2) for s in h.shape] res = np.empty(data.shape, np.float32) for data_tile, data_s_src, data_s_dest \ in tile_iterator(data, blocksize=N_sub, padsize=Npads, mode="constant"): res_tile = _convolve_np(data_tile.copy(), h) res[data_s_src] = res_tile[data_s_dest] return res else: raise TypeError("unknown types (%s, %s)" % (type(data), type(h)))
python
def convolve(data, h, res_g=None, sub_blocks=None): """ convolves 1d-3d data with kernel h data and h can either be numpy arrays or gpu buffer objects (OCLArray, which must be float32 then) boundary conditions are clamping to zero at edge. """ if not len(data.shape) in [1, 2, 3]: raise ValueError("dim = %s not supported" % (len(data.shape))) if len(data.shape) != len(h.shape): raise ValueError("dimemnsion of data (%s) and h (%s) are different" % (len(data.shape), len(h.shape))) if isinstance(data, OCLArray) and isinstance(h, OCLArray): return _convolve_buf(data, h, res_g) elif isinstance(data, np.ndarray) and isinstance(h, np.ndarray): if sub_blocks == (1,) * len(data.shape) or sub_blocks is None: return _convolve_np(data, h) else: # cut the image into tile and operate on every of them N_sub = [int(np.ceil(1. * n / s)) for n, s in zip(data.shape, sub_blocks)] Npads = [int(s / 2) for s in h.shape] res = np.empty(data.shape, np.float32) for data_tile, data_s_src, data_s_dest \ in tile_iterator(data, blocksize=N_sub, padsize=Npads, mode="constant"): res_tile = _convolve_np(data_tile.copy(), h) res[data_s_src] = res_tile[data_s_dest] return res else: raise TypeError("unknown types (%s, %s)" % (type(data), type(h)))
['def', 'convolve', '(', 'data', ',', 'h', ',', 'res_g', '=', 'None', ',', 'sub_blocks', '=', 'None', ')', ':', 'if', 'not', 'len', '(', 'data', '.', 'shape', ')', 'in', '[', '1', ',', '2', ',', '3', ']', ':', 'raise', 'ValueError', '(', '"dim = %s not supported"', '%', '(', 'len', '(', 'data', '.', 'shape', ')', ')', ')', 'if', 'len', '(', 'data', '.', 'shape', ')', '!=', 'len', '(', 'h', '.', 'shape', ')', ':', 'raise', 'ValueError', '(', '"dimemnsion of data (%s) and h (%s) are different"', '%', '(', 'len', '(', 'data', '.', 'shape', ')', ',', 'len', '(', 'h', '.', 'shape', ')', ')', ')', 'if', 'isinstance', '(', 'data', ',', 'OCLArray', ')', 'and', 'isinstance', '(', 'h', ',', 'OCLArray', ')', ':', 'return', '_convolve_buf', '(', 'data', ',', 'h', ',', 'res_g', ')', 'elif', 'isinstance', '(', 'data', ',', 'np', '.', 'ndarray', ')', 'and', 'isinstance', '(', 'h', ',', 'np', '.', 'ndarray', ')', ':', 'if', 'sub_blocks', '==', '(', '1', ',', ')', '*', 'len', '(', 'data', '.', 'shape', ')', 'or', 'sub_blocks', 'is', 'None', ':', 'return', '_convolve_np', '(', 'data', ',', 'h', ')', 'else', ':', '# cut the image into tile and operate on every of them', 'N_sub', '=', '[', 'int', '(', 'np', '.', 'ceil', '(', '1.', '*', 'n', '/', 's', ')', ')', 'for', 'n', ',', 's', 'in', 'zip', '(', 'data', '.', 'shape', ',', 'sub_blocks', ')', ']', 'Npads', '=', '[', 'int', '(', 's', '/', '2', ')', 'for', 's', 'in', 'h', '.', 'shape', ']', 'res', '=', 'np', '.', 'empty', '(', 'data', '.', 'shape', ',', 'np', '.', 'float32', ')', 'for', 'data_tile', ',', 'data_s_src', ',', 'data_s_dest', 'in', 'tile_iterator', '(', 'data', ',', 'blocksize', '=', 'N_sub', ',', 'padsize', '=', 'Npads', ',', 'mode', '=', '"constant"', ')', ':', 'res_tile', '=', '_convolve_np', '(', 'data_tile', '.', 'copy', '(', ')', ',', 'h', ')', 'res', '[', 'data_s_src', ']', '=', 'res_tile', '[', 'data_s_dest', ']', 'return', 'res', 'else', ':', 'raise', 'TypeError', '(', '"unknown types (%s, %s)"', '%', '(', 'type', '(', 'data', ')', ',', 'type', '(', 'h', ')', ')', ')']
convolves 1d-3d data with kernel h data and h can either be numpy arrays or gpu buffer objects (OCLArray, which must be float32 then) boundary conditions are clamping to zero at edge.
['convolves', '1d', '-', '3d', 'data', 'with', 'kernel', 'h']
train
https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/convolve/convolve.py#L18-L54
1,251
Valassis-Digital-Media/spylon
spylon/spark/progress.py
start
def start(sc, timedelta_formatter=_pretty_time_delta, bar_width=20, sleep_time=0.5): """Creates a :class:`ProgressPrinter` that polls the SparkContext for information about active stage progress and prints that information to stderr. The printer runs in a thread and is useful for showing text-based progress bars in interactive environments (e.g., REPLs, Jupyter Notebooks). This function creates a singleton printer instance and returns that instance no matter what arguments are passed to this function again until :func:`stop` is called to shutdown the singleton. If you want more control over the printer lifecycle, create an instance of :class:`ProgressPrinter` directly and use its methods. Parameters ---------- sc: :class:`pyspark.context.SparkContext`, optional SparkContext to use to create a new thread timedelta_formatter : callable, optional Converts a timedelta to a string. bar_width : int, optional Width of the progressbar to print out. sleep_time : float, optional Frequency in seconds with which to poll Apache Spark for task stage information. Returns ------- :class:`ProgressPrinter` """ global _printer_singleton if _printer_singleton is None: _printer_singleton = ProgressPrinter(sc, timedelta_formatter, bar_width, sleep_time) _printer_singleton.start() return _printer_singleton
python
def start(sc, timedelta_formatter=_pretty_time_delta, bar_width=20, sleep_time=0.5): """Creates a :class:`ProgressPrinter` that polls the SparkContext for information about active stage progress and prints that information to stderr. The printer runs in a thread and is useful for showing text-based progress bars in interactive environments (e.g., REPLs, Jupyter Notebooks). This function creates a singleton printer instance and returns that instance no matter what arguments are passed to this function again until :func:`stop` is called to shutdown the singleton. If you want more control over the printer lifecycle, create an instance of :class:`ProgressPrinter` directly and use its methods. Parameters ---------- sc: :class:`pyspark.context.SparkContext`, optional SparkContext to use to create a new thread timedelta_formatter : callable, optional Converts a timedelta to a string. bar_width : int, optional Width of the progressbar to print out. sleep_time : float, optional Frequency in seconds with which to poll Apache Spark for task stage information. Returns ------- :class:`ProgressPrinter` """ global _printer_singleton if _printer_singleton is None: _printer_singleton = ProgressPrinter(sc, timedelta_formatter, bar_width, sleep_time) _printer_singleton.start() return _printer_singleton
['def', 'start', '(', 'sc', ',', 'timedelta_formatter', '=', '_pretty_time_delta', ',', 'bar_width', '=', '20', ',', 'sleep_time', '=', '0.5', ')', ':', 'global', '_printer_singleton', 'if', '_printer_singleton', 'is', 'None', ':', '_printer_singleton', '=', 'ProgressPrinter', '(', 'sc', ',', 'timedelta_formatter', ',', 'bar_width', ',', 'sleep_time', ')', '_printer_singleton', '.', 'start', '(', ')', 'return', '_printer_singleton']
Creates a :class:`ProgressPrinter` that polls the SparkContext for information about active stage progress and prints that information to stderr. The printer runs in a thread and is useful for showing text-based progress bars in interactive environments (e.g., REPLs, Jupyter Notebooks). This function creates a singleton printer instance and returns that instance no matter what arguments are passed to this function again until :func:`stop` is called to shutdown the singleton. If you want more control over the printer lifecycle, create an instance of :class:`ProgressPrinter` directly and use its methods. Parameters ---------- sc: :class:`pyspark.context.SparkContext`, optional SparkContext to use to create a new thread timedelta_formatter : callable, optional Converts a timedelta to a string. bar_width : int, optional Width of the progressbar to print out. sleep_time : float, optional Frequency in seconds with which to poll Apache Spark for task stage information. Returns ------- :class:`ProgressPrinter`
['Creates', 'a', ':', 'class', ':', 'ProgressPrinter', 'that', 'polls', 'the', 'SparkContext', 'for', 'information', 'about', 'active', 'stage', 'progress', 'and', 'prints', 'that', 'information', 'to', 'stderr', '.']
train
https://github.com/Valassis-Digital-Media/spylon/blob/ac00e285fa1c790674606b793819c3e5baee0d48/spylon/spark/progress.py#L205-L238
1,252
riga/scinum
scinum.py
atan
def atan(x): """ tan(x) Trigonometric arc tan function. """ _math = infer_math(x) if _math is math: return _math.atan(x) else: return _math.arctan(x)
python
def atan(x): """ tan(x) Trigonometric arc tan function. """ _math = infer_math(x) if _math is math: return _math.atan(x) else: return _math.arctan(x)
['def', 'atan', '(', 'x', ')', ':', '_math', '=', 'infer_math', '(', 'x', ')', 'if', '_math', 'is', 'math', ':', 'return', '_math', '.', 'atan', '(', 'x', ')', 'else', ':', 'return', '_math', '.', 'arctan', '(', 'x', ')']
tan(x) Trigonometric arc tan function.
['tan', '(', 'x', ')', 'Trigonometric', 'arc', 'tan', 'function', '.']
train
https://github.com/riga/scinum/blob/55eb6d8aa77beacee5a07443392954b8a0aad8cb/scinum.py#L1188-L1196
1,253
materialsproject/pymatgen
pymatgen/analysis/phase_diagram.py
PDPlotter._get_3d_plot
def _get_3d_plot(self, label_stable=True): """ Shows the plot using pylab. Usually I won"t do imports in methods, but since plotting is a fairly expensive library to load and not all machines have matplotlib installed, I have done it this way. """ import matplotlib.pyplot as plt import mpl_toolkits.mplot3d.axes3d as p3 from matplotlib.font_manager import FontProperties fig = plt.figure() ax = p3.Axes3D(fig) font = FontProperties() font.set_weight("bold") font.set_size(20) (lines, labels, unstable) = self.pd_plot_data count = 1 newlabels = list() for x, y, z in lines: ax.plot(x, y, z, "bo-", linewidth=3, markeredgecolor="b", markerfacecolor="r", markersize=10) for coords in sorted(labels.keys()): entry = labels[coords] label = entry.name if label_stable: if len(entry.composition.elements) == 1: ax.text(coords[0], coords[1], coords[2], label) else: ax.text(coords[0], coords[1], coords[2], str(count)) newlabels.append("{} : {}".format(count, latexify(label))) count += 1 plt.figtext(0.01, 0.01, "\n".join(newlabels)) ax.axis("off") return plt
python
def _get_3d_plot(self, label_stable=True): """ Shows the plot using pylab. Usually I won"t do imports in methods, but since plotting is a fairly expensive library to load and not all machines have matplotlib installed, I have done it this way. """ import matplotlib.pyplot as plt import mpl_toolkits.mplot3d.axes3d as p3 from matplotlib.font_manager import FontProperties fig = plt.figure() ax = p3.Axes3D(fig) font = FontProperties() font.set_weight("bold") font.set_size(20) (lines, labels, unstable) = self.pd_plot_data count = 1 newlabels = list() for x, y, z in lines: ax.plot(x, y, z, "bo-", linewidth=3, markeredgecolor="b", markerfacecolor="r", markersize=10) for coords in sorted(labels.keys()): entry = labels[coords] label = entry.name if label_stable: if len(entry.composition.elements) == 1: ax.text(coords[0], coords[1], coords[2], label) else: ax.text(coords[0], coords[1], coords[2], str(count)) newlabels.append("{} : {}".format(count, latexify(label))) count += 1 plt.figtext(0.01, 0.01, "\n".join(newlabels)) ax.axis("off") return plt
['def', '_get_3d_plot', '(', 'self', ',', 'label_stable', '=', 'True', ')', ':', 'import', 'matplotlib', '.', 'pyplot', 'as', 'plt', 'import', 'mpl_toolkits', '.', 'mplot3d', '.', 'axes3d', 'as', 'p3', 'from', 'matplotlib', '.', 'font_manager', 'import', 'FontProperties', 'fig', '=', 'plt', '.', 'figure', '(', ')', 'ax', '=', 'p3', '.', 'Axes3D', '(', 'fig', ')', 'font', '=', 'FontProperties', '(', ')', 'font', '.', 'set_weight', '(', '"bold"', ')', 'font', '.', 'set_size', '(', '20', ')', '(', 'lines', ',', 'labels', ',', 'unstable', ')', '=', 'self', '.', 'pd_plot_data', 'count', '=', '1', 'newlabels', '=', 'list', '(', ')', 'for', 'x', ',', 'y', ',', 'z', 'in', 'lines', ':', 'ax', '.', 'plot', '(', 'x', ',', 'y', ',', 'z', ',', '"bo-"', ',', 'linewidth', '=', '3', ',', 'markeredgecolor', '=', '"b"', ',', 'markerfacecolor', '=', '"r"', ',', 'markersize', '=', '10', ')', 'for', 'coords', 'in', 'sorted', '(', 'labels', '.', 'keys', '(', ')', ')', ':', 'entry', '=', 'labels', '[', 'coords', ']', 'label', '=', 'entry', '.', 'name', 'if', 'label_stable', ':', 'if', 'len', '(', 'entry', '.', 'composition', '.', 'elements', ')', '==', '1', ':', 'ax', '.', 'text', '(', 'coords', '[', '0', ']', ',', 'coords', '[', '1', ']', ',', 'coords', '[', '2', ']', ',', 'label', ')', 'else', ':', 'ax', '.', 'text', '(', 'coords', '[', '0', ']', ',', 'coords', '[', '1', ']', ',', 'coords', '[', '2', ']', ',', 'str', '(', 'count', ')', ')', 'newlabels', '.', 'append', '(', '"{} : {}"', '.', 'format', '(', 'count', ',', 'latexify', '(', 'label', ')', ')', ')', 'count', '+=', '1', 'plt', '.', 'figtext', '(', '0.01', ',', '0.01', ',', '"\\n"', '.', 'join', '(', 'newlabels', ')', ')', 'ax', '.', 'axis', '(', '"off"', ')', 'return', 'plt']
Shows the plot using pylab. Usually I won"t do imports in methods, but since plotting is a fairly expensive library to load and not all machines have matplotlib installed, I have done it this way.
['Shows', 'the', 'plot', 'using', 'pylab', '.', 'Usually', 'I', 'won', 't', 'do', 'imports', 'in', 'methods', 'but', 'since', 'plotting', 'is', 'a', 'fairly', 'expensive', 'library', 'to', 'load', 'and', 'not', 'all', 'machines', 'have', 'matplotlib', 'installed', 'I', 'have', 'done', 'it', 'this', 'way', '.']
train
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/phase_diagram.py#L1638-L1670
1,254
yougov/solr-doc-manager
mongo_connector/doc_managers/solr_doc_manager.py
DocManager.upsert
def upsert(self, doc, namespace, timestamp): """Update or insert a document into Solr This method should call whatever add/insert/update method exists for the backend engine and add the document in there. The input will always be one mongo document, represented as a Python dictionary. """ if self.auto_commit_interval is not None: self.solr.add([self._clean_doc(doc, namespace, timestamp)], commit=(self.auto_commit_interval == 0), commitWithin=u(self.auto_commit_interval)) else: self.solr.add([self._clean_doc(doc, namespace, timestamp)], commit=False)
python
def upsert(self, doc, namespace, timestamp): """Update or insert a document into Solr This method should call whatever add/insert/update method exists for the backend engine and add the document in there. The input will always be one mongo document, represented as a Python dictionary. """ if self.auto_commit_interval is not None: self.solr.add([self._clean_doc(doc, namespace, timestamp)], commit=(self.auto_commit_interval == 0), commitWithin=u(self.auto_commit_interval)) else: self.solr.add([self._clean_doc(doc, namespace, timestamp)], commit=False)
['def', 'upsert', '(', 'self', ',', 'doc', ',', 'namespace', ',', 'timestamp', ')', ':', 'if', 'self', '.', 'auto_commit_interval', 'is', 'not', 'None', ':', 'self', '.', 'solr', '.', 'add', '(', '[', 'self', '.', '_clean_doc', '(', 'doc', ',', 'namespace', ',', 'timestamp', ')', ']', ',', 'commit', '=', '(', 'self', '.', 'auto_commit_interval', '==', '0', ')', ',', 'commitWithin', '=', 'u', '(', 'self', '.', 'auto_commit_interval', ')', ')', 'else', ':', 'self', '.', 'solr', '.', 'add', '(', '[', 'self', '.', '_clean_doc', '(', 'doc', ',', 'namespace', ',', 'timestamp', ')', ']', ',', 'commit', '=', 'False', ')']
Update or insert a document into Solr This method should call whatever add/insert/update method exists for the backend engine and add the document in there. The input will always be one mongo document, represented as a Python dictionary.
['Update', 'or', 'insert', 'a', 'document', 'into', 'Solr']
train
https://github.com/yougov/solr-doc-manager/blob/1978bf6f3387b1afd6dd6b41a1bbaea9932d60fd/mongo_connector/doc_managers/solr_doc_manager.py#L261-L274
1,255
ask/redish
redish/types.py
ZSet.increment
def increment(self, member, amount=1): """Increment the score of ``member`` by ``amount``.""" self._dict[member] += amount return self._dict[member]
python
def increment(self, member, amount=1): """Increment the score of ``member`` by ``amount``.""" self._dict[member] += amount return self._dict[member]
['def', 'increment', '(', 'self', ',', 'member', ',', 'amount', '=', '1', ')', ':', 'self', '.', '_dict', '[', 'member', ']', '+=', 'amount', 'return', 'self', '.', '_dict', '[', 'member', ']']
Increment the score of ``member`` by ``amount``.
['Increment', 'the', 'score', 'of', 'member', 'by', 'amount', '.']
train
https://github.com/ask/redish/blob/4845f8d5e12fd953ecad624b4e1e89f79a082a3e/redish/types.py#L771-L774
1,256
mongodb/mongo-python-driver
bson/__init__.py
_bson_to_dict
def _bson_to_dict(data, opts): """Decode a BSON string to document_class.""" try: if _raw_document_class(opts.document_class): return opts.document_class(data, opts) _, end = _get_object_size(data, 0, len(data)) return _elements_to_dict(data, 4, end, opts) except InvalidBSON: raise except Exception: # Change exception type to InvalidBSON but preserve traceback. _, exc_value, exc_tb = sys.exc_info() reraise(InvalidBSON, exc_value, exc_tb)
python
def _bson_to_dict(data, opts): """Decode a BSON string to document_class.""" try: if _raw_document_class(opts.document_class): return opts.document_class(data, opts) _, end = _get_object_size(data, 0, len(data)) return _elements_to_dict(data, 4, end, opts) except InvalidBSON: raise except Exception: # Change exception type to InvalidBSON but preserve traceback. _, exc_value, exc_tb = sys.exc_info() reraise(InvalidBSON, exc_value, exc_tb)
['def', '_bson_to_dict', '(', 'data', ',', 'opts', ')', ':', 'try', ':', 'if', '_raw_document_class', '(', 'opts', '.', 'document_class', ')', ':', 'return', 'opts', '.', 'document_class', '(', 'data', ',', 'opts', ')', '_', ',', 'end', '=', '_get_object_size', '(', 'data', ',', '0', ',', 'len', '(', 'data', ')', ')', 'return', '_elements_to_dict', '(', 'data', ',', '4', ',', 'end', ',', 'opts', ')', 'except', 'InvalidBSON', ':', 'raise', 'except', 'Exception', ':', '# Change exception type to InvalidBSON but preserve traceback.', '_', ',', 'exc_value', ',', 'exc_tb', '=', 'sys', '.', 'exc_info', '(', ')', 'reraise', '(', 'InvalidBSON', ',', 'exc_value', ',', 'exc_tb', ')']
Decode a BSON string to document_class.
['Decode', 'a', 'BSON', 'string', 'to', 'document_class', '.']
train
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/bson/__init__.py#L434-L446
1,257
getpelican/pelican-plugins
permalinks/permalinks.py
get_permalink_ids_iter
def get_permalink_ids_iter(self): ''' Method to get permalink ids from content. To be bound to the class last thing. ''' permalink_id_key = self.settings['PERMALINK_ID_METADATA_KEY'] permalink_ids = self.metadata.get(permalink_id_key, '') for permalink_id in permalink_ids.split(','): if permalink_id: yield permalink_id.strip()
python
def get_permalink_ids_iter(self): ''' Method to get permalink ids from content. To be bound to the class last thing. ''' permalink_id_key = self.settings['PERMALINK_ID_METADATA_KEY'] permalink_ids = self.metadata.get(permalink_id_key, '') for permalink_id in permalink_ids.split(','): if permalink_id: yield permalink_id.strip()
['def', 'get_permalink_ids_iter', '(', 'self', ')', ':', 'permalink_id_key', '=', 'self', '.', 'settings', '[', "'PERMALINK_ID_METADATA_KEY'", ']', 'permalink_ids', '=', 'self', '.', 'metadata', '.', 'get', '(', 'permalink_id_key', ',', "''", ')', 'for', 'permalink_id', 'in', 'permalink_ids', '.', 'split', '(', "','", ')', ':', 'if', 'permalink_id', ':', 'yield', 'permalink_id', '.', 'strip', '(', ')']
Method to get permalink ids from content. To be bound to the class last thing.
['Method', 'to', 'get', 'permalink', 'ids', 'from', 'content', '.', 'To', 'be', 'bound', 'to', 'the', 'class', 'last', 'thing', '.']
train
https://github.com/getpelican/pelican-plugins/blob/cfc7a3f224f1743063b034561f89a6a712d13587/permalinks/permalinks.py#L82-L92
1,258
vtkiorg/vtki
vtki/pointset.py
UnstructuredGrid.linear_copy
def linear_copy(self, deep=False): """ Returns a copy of the input unstructured grid containing only linear cells. Converts the following cell types to their linear equivalents. - VTK_QUADRATIC_TETRA --> VTK_TETRA - VTK_QUADRATIC_PYRAMID --> VTK_PYRAMID - VTK_QUADRATIC_WEDGE --> VTK_WEDGE - VTK_QUADRATIC_HEXAHEDRON --> VTK_HEXAHEDRON Parameters ---------- deep : bool When True, makes a copy of the points array. Default False. Cells and cell types are always copied. Returns ------- grid : vtki.UnstructuredGrid UnstructuredGrid containing only linear cells. """ lgrid = self.copy(deep) # grab the vtk object vtk_cell_type = numpy_to_vtk(self.GetCellTypesArray(), deep=True) celltype = vtk_to_numpy(vtk_cell_type) celltype[celltype == VTK_QUADRATIC_TETRA] = VTK_TETRA celltype[celltype == VTK_QUADRATIC_PYRAMID] = VTK_PYRAMID celltype[celltype == VTK_QUADRATIC_WEDGE] = VTK_WEDGE celltype[celltype == VTK_QUADRATIC_HEXAHEDRON] = VTK_HEXAHEDRON # track quad mask for later quad_quad_mask = celltype == VTK_QUADRATIC_QUAD celltype[quad_quad_mask] = VTK_QUAD quad_tri_mask = celltype == VTK_QUADRATIC_TRIANGLE celltype[quad_tri_mask] = VTK_TRIANGLE vtk_offset = self.GetCellLocationsArray() cells = vtk.vtkCellArray() cells.DeepCopy(self.GetCells()) lgrid.SetCells(vtk_cell_type, vtk_offset, cells) # fixing bug with display of quad cells if np.any(quad_quad_mask): quad_offset = lgrid.offset[quad_quad_mask] base_point = lgrid.cells[quad_offset + 1] lgrid.cells[quad_offset + 5] = base_point lgrid.cells[quad_offset + 6] = base_point lgrid.cells[quad_offset + 7] = base_point lgrid.cells[quad_offset + 8] = base_point if np.any(quad_tri_mask): tri_offset = lgrid.offset[quad_tri_mask] base_point = lgrid.cells[tri_offset + 1] lgrid.cells[tri_offset + 4] = base_point lgrid.cells[tri_offset + 5] = base_point lgrid.cells[tri_offset + 6] = base_point return lgrid
python
def linear_copy(self, deep=False): """ Returns a copy of the input unstructured grid containing only linear cells. Converts the following cell types to their linear equivalents. - VTK_QUADRATIC_TETRA --> VTK_TETRA - VTK_QUADRATIC_PYRAMID --> VTK_PYRAMID - VTK_QUADRATIC_WEDGE --> VTK_WEDGE - VTK_QUADRATIC_HEXAHEDRON --> VTK_HEXAHEDRON Parameters ---------- deep : bool When True, makes a copy of the points array. Default False. Cells and cell types are always copied. Returns ------- grid : vtki.UnstructuredGrid UnstructuredGrid containing only linear cells. """ lgrid = self.copy(deep) # grab the vtk object vtk_cell_type = numpy_to_vtk(self.GetCellTypesArray(), deep=True) celltype = vtk_to_numpy(vtk_cell_type) celltype[celltype == VTK_QUADRATIC_TETRA] = VTK_TETRA celltype[celltype == VTK_QUADRATIC_PYRAMID] = VTK_PYRAMID celltype[celltype == VTK_QUADRATIC_WEDGE] = VTK_WEDGE celltype[celltype == VTK_QUADRATIC_HEXAHEDRON] = VTK_HEXAHEDRON # track quad mask for later quad_quad_mask = celltype == VTK_QUADRATIC_QUAD celltype[quad_quad_mask] = VTK_QUAD quad_tri_mask = celltype == VTK_QUADRATIC_TRIANGLE celltype[quad_tri_mask] = VTK_TRIANGLE vtk_offset = self.GetCellLocationsArray() cells = vtk.vtkCellArray() cells.DeepCopy(self.GetCells()) lgrid.SetCells(vtk_cell_type, vtk_offset, cells) # fixing bug with display of quad cells if np.any(quad_quad_mask): quad_offset = lgrid.offset[quad_quad_mask] base_point = lgrid.cells[quad_offset + 1] lgrid.cells[quad_offset + 5] = base_point lgrid.cells[quad_offset + 6] = base_point lgrid.cells[quad_offset + 7] = base_point lgrid.cells[quad_offset + 8] = base_point if np.any(quad_tri_mask): tri_offset = lgrid.offset[quad_tri_mask] base_point = lgrid.cells[tri_offset + 1] lgrid.cells[tri_offset + 4] = base_point lgrid.cells[tri_offset + 5] = base_point lgrid.cells[tri_offset + 6] = base_point return lgrid
['def', 'linear_copy', '(', 'self', ',', 'deep', '=', 'False', ')', ':', 'lgrid', '=', 'self', '.', 'copy', '(', 'deep', ')', '# grab the vtk object', 'vtk_cell_type', '=', 'numpy_to_vtk', '(', 'self', '.', 'GetCellTypesArray', '(', ')', ',', 'deep', '=', 'True', ')', 'celltype', '=', 'vtk_to_numpy', '(', 'vtk_cell_type', ')', 'celltype', '[', 'celltype', '==', 'VTK_QUADRATIC_TETRA', ']', '=', 'VTK_TETRA', 'celltype', '[', 'celltype', '==', 'VTK_QUADRATIC_PYRAMID', ']', '=', 'VTK_PYRAMID', 'celltype', '[', 'celltype', '==', 'VTK_QUADRATIC_WEDGE', ']', '=', 'VTK_WEDGE', 'celltype', '[', 'celltype', '==', 'VTK_QUADRATIC_HEXAHEDRON', ']', '=', 'VTK_HEXAHEDRON', '# track quad mask for later', 'quad_quad_mask', '=', 'celltype', '==', 'VTK_QUADRATIC_QUAD', 'celltype', '[', 'quad_quad_mask', ']', '=', 'VTK_QUAD', 'quad_tri_mask', '=', 'celltype', '==', 'VTK_QUADRATIC_TRIANGLE', 'celltype', '[', 'quad_tri_mask', ']', '=', 'VTK_TRIANGLE', 'vtk_offset', '=', 'self', '.', 'GetCellLocationsArray', '(', ')', 'cells', '=', 'vtk', '.', 'vtkCellArray', '(', ')', 'cells', '.', 'DeepCopy', '(', 'self', '.', 'GetCells', '(', ')', ')', 'lgrid', '.', 'SetCells', '(', 'vtk_cell_type', ',', 'vtk_offset', ',', 'cells', ')', '# fixing bug with display of quad cells', 'if', 'np', '.', 'any', '(', 'quad_quad_mask', ')', ':', 'quad_offset', '=', 'lgrid', '.', 'offset', '[', 'quad_quad_mask', ']', 'base_point', '=', 'lgrid', '.', 'cells', '[', 'quad_offset', '+', '1', ']', 'lgrid', '.', 'cells', '[', 'quad_offset', '+', '5', ']', '=', 'base_point', 'lgrid', '.', 'cells', '[', 'quad_offset', '+', '6', ']', '=', 'base_point', 'lgrid', '.', 'cells', '[', 'quad_offset', '+', '7', ']', '=', 'base_point', 'lgrid', '.', 'cells', '[', 'quad_offset', '+', '8', ']', '=', 'base_point', 'if', 'np', '.', 'any', '(', 'quad_tri_mask', ')', ':', 'tri_offset', '=', 'lgrid', '.', 'offset', '[', 'quad_tri_mask', ']', 'base_point', '=', 'lgrid', '.', 'cells', '[', 'tri_offset', '+', '1', ']', 'lgrid', '.', 'cells', '[', 'tri_offset', '+', '4', ']', '=', 'base_point', 'lgrid', '.', 'cells', '[', 'tri_offset', '+', '5', ']', '=', 'base_point', 'lgrid', '.', 'cells', '[', 'tri_offset', '+', '6', ']', '=', 'base_point', 'return', 'lgrid']
Returns a copy of the input unstructured grid containing only linear cells. Converts the following cell types to their linear equivalents. - VTK_QUADRATIC_TETRA --> VTK_TETRA - VTK_QUADRATIC_PYRAMID --> VTK_PYRAMID - VTK_QUADRATIC_WEDGE --> VTK_WEDGE - VTK_QUADRATIC_HEXAHEDRON --> VTK_HEXAHEDRON Parameters ---------- deep : bool When True, makes a copy of the points array. Default False. Cells and cell types are always copied. Returns ------- grid : vtki.UnstructuredGrid UnstructuredGrid containing only linear cells.
['Returns', 'a', 'copy', 'of', 'the', 'input', 'unstructured', 'grid', 'containing', 'only', 'linear', 'cells', '.', 'Converts', 'the', 'following', 'cell', 'types', 'to', 'their', 'linear', 'equivalents', '.']
train
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/pointset.py#L2015-L2075
1,259
pypa/pipenv
pipenv/patched/notpip/_vendor/pkg_resources/__init__.py
WorkingSet._build_master
def _build_master(cls): """ Prepare the master working set. """ ws = cls() try: from __main__ import __requires__ except ImportError: # The main program does not list any requirements return ws # ensure the requirements are met try: ws.require(__requires__) except VersionConflict: return cls._build_from_requirements(__requires__) return ws
python
def _build_master(cls): """ Prepare the master working set. """ ws = cls() try: from __main__ import __requires__ except ImportError: # The main program does not list any requirements return ws # ensure the requirements are met try: ws.require(__requires__) except VersionConflict: return cls._build_from_requirements(__requires__) return ws
['def', '_build_master', '(', 'cls', ')', ':', 'ws', '=', 'cls', '(', ')', 'try', ':', 'from', '__main__', 'import', '__requires__', 'except', 'ImportError', ':', '# The main program does not list any requirements', 'return', 'ws', '# ensure the requirements are met', 'try', ':', 'ws', '.', 'require', '(', '__requires__', ')', 'except', 'VersionConflict', ':', 'return', 'cls', '.', '_build_from_requirements', '(', '__requires__', ')', 'return', 'ws']
Prepare the master working set.
['Prepare', 'the', 'master', 'working', 'set', '.']
train
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_vendor/pkg_resources/__init__.py#L568-L585
1,260
mitsei/dlkit
dlkit/json_/learning/queries.py
ProficiencyQuery.match_objective_id
def match_objective_id(self, objective_id, match): """Sets the objective ``Id`` for this query. arg: objective_id (osid.id.Id): an objective ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: NullArgument - ``objective_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ if not isinstance(objective_id, Id): raise errors.InvalidArgument() self._add_match('objectiveId', str(objective_id), match)
python
def match_objective_id(self, objective_id, match): """Sets the objective ``Id`` for this query. arg: objective_id (osid.id.Id): an objective ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: NullArgument - ``objective_id`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ if not isinstance(objective_id, Id): raise errors.InvalidArgument() self._add_match('objectiveId', str(objective_id), match)
['def', 'match_objective_id', '(', 'self', ',', 'objective_id', ',', 'match', ')', ':', 'if', 'not', 'isinstance', '(', 'objective_id', ',', 'Id', ')', ':', 'raise', 'errors', '.', 'InvalidArgument', '(', ')', 'self', '.', '_add_match', '(', "'objectiveId'", ',', 'str', '(', 'objective_id', ')', ',', 'match', ')']
Sets the objective ``Id`` for this query. arg: objective_id (osid.id.Id): an objective ``Id`` arg: match (boolean): ``true`` for a positive match, ``false`` for a negative match raise: NullArgument - ``objective_id`` is ``null`` *compliance: mandatory -- This method must be implemented.*
['Sets', 'the', 'objective', 'Id', 'for', 'this', 'query', '.']
train
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/learning/queries.py#L1240-L1252
1,261
kubernetes-client/python
kubernetes/client/apis/apps_v1_api.py
AppsV1Api.replace_namespaced_replica_set_scale
def replace_namespaced_replica_set_scale(self, name, namespace, body, **kwargs): """ replace scale of the specified ReplicaSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_replica_set_scale(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Scale (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Scale body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :return: V1Scale If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_replica_set_scale_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_replica_set_scale_with_http_info(name, namespace, body, **kwargs) return data
python
def replace_namespaced_replica_set_scale(self, name, namespace, body, **kwargs): """ replace scale of the specified ReplicaSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_replica_set_scale(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Scale (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Scale body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :return: V1Scale If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_replica_set_scale_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_replica_set_scale_with_http_info(name, namespace, body, **kwargs) return data
['def', 'replace_namespaced_replica_set_scale', '(', 'self', ',', 'name', ',', 'namespace', ',', 'body', ',', '*', '*', 'kwargs', ')', ':', 'kwargs', '[', "'_return_http_data_only'", ']', '=', 'True', 'if', 'kwargs', '.', 'get', '(', "'async_req'", ')', ':', 'return', 'self', '.', 'replace_namespaced_replica_set_scale_with_http_info', '(', 'name', ',', 'namespace', ',', 'body', ',', '*', '*', 'kwargs', ')', 'else', ':', '(', 'data', ')', '=', 'self', '.', 'replace_namespaced_replica_set_scale_with_http_info', '(', 'name', ',', 'namespace', ',', 'body', ',', '*', '*', 'kwargs', ')', 'return', 'data']
replace scale of the specified ReplicaSet This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_replica_set_scale(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Scale (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Scale body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. :return: V1Scale If the method is called asynchronously, returns the request thread.
['replace', 'scale', 'of', 'the', 'specified', 'ReplicaSet', 'This', 'method', 'makes', 'a', 'synchronous', 'HTTP', 'request', 'by', 'default', '.', 'To', 'make', 'an', 'asynchronous', 'HTTP', 'request', 'please', 'pass', 'async_req', '=', 'True', '>>>', 'thread', '=', 'api', '.', 'replace_namespaced_replica_set_scale', '(', 'name', 'namespace', 'body', 'async_req', '=', 'True', ')', '>>>', 'result', '=', 'thread', '.', 'get', '()']
train
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/apps_v1_api.py#L6911-L6935
1,262
firstprayer/monsql
monsql/db.py
Database.drop_table
def drop_table(self, tablename, silent=False): """ Drop a table :Parameters: - tablename: string - slient: boolean. If false and the table doesn't exists an exception will be raised; Otherwise it will be ignored :Return: Nothing """ if not silent and not self.is_table_existed(tablename): raise MonSQLException('TABLE %s DOES NOT EXIST' %tablename) self.__cursor.execute('DROP TABLE IF EXISTS %s' %(tablename)) self.__db.commit()
python
def drop_table(self, tablename, silent=False): """ Drop a table :Parameters: - tablename: string - slient: boolean. If false and the table doesn't exists an exception will be raised; Otherwise it will be ignored :Return: Nothing """ if not silent and not self.is_table_existed(tablename): raise MonSQLException('TABLE %s DOES NOT EXIST' %tablename) self.__cursor.execute('DROP TABLE IF EXISTS %s' %(tablename)) self.__db.commit()
['def', 'drop_table', '(', 'self', ',', 'tablename', ',', 'silent', '=', 'False', ')', ':', 'if', 'not', 'silent', 'and', 'not', 'self', '.', 'is_table_existed', '(', 'tablename', ')', ':', 'raise', 'MonSQLException', '(', "'TABLE %s DOES NOT EXIST'", '%', 'tablename', ')', 'self', '.', '__cursor', '.', 'execute', '(', "'DROP TABLE IF EXISTS %s'", '%', '(', 'tablename', ')', ')', 'self', '.', '__db', '.', 'commit', '(', ')']
Drop a table :Parameters: - tablename: string - slient: boolean. If false and the table doesn't exists an exception will be raised; Otherwise it will be ignored :Return: Nothing
['Drop', 'a', 'table']
train
https://github.com/firstprayer/monsql/blob/6285c15b574c8664046eae2edfeb548c7b173efd/monsql/db.py#L138-L155
1,263
saxix/django-concurrency
src/concurrency/utils.py
get_classname
def get_classname(o): """ Returns the classname of an object r a class :param o: :return: """ if inspect.isclass(o): target = o elif callable(o): target = o else: target = o.__class__ try: return target.__qualname__ except AttributeError: # pragma: no cover return target.__name__
python
def get_classname(o): """ Returns the classname of an object r a class :param o: :return: """ if inspect.isclass(o): target = o elif callable(o): target = o else: target = o.__class__ try: return target.__qualname__ except AttributeError: # pragma: no cover return target.__name__
['def', 'get_classname', '(', 'o', ')', ':', 'if', 'inspect', '.', 'isclass', '(', 'o', ')', ':', 'target', '=', 'o', 'elif', 'callable', '(', 'o', ')', ':', 'target', '=', 'o', 'else', ':', 'target', '=', 'o', '.', '__class__', 'try', ':', 'return', 'target', '.', '__qualname__', 'except', 'AttributeError', ':', '# pragma: no cover', 'return', 'target', '.', '__name__']
Returns the classname of an object r a class :param o: :return:
['Returns', 'the', 'classname', 'of', 'an', 'object', 'r', 'a', 'class']
train
https://github.com/saxix/django-concurrency/blob/9a289dc007b1cdf609b7dfb77a6d2868abc8097f/src/concurrency/utils.py#L121-L136
1,264
briney/abutils
abutils/utils/phylogeny.py
igphyml
def igphyml(input_file=None, tree_file=None, root=None, verbose=False): ''' Computes a phylogenetic tree using IgPhyML. .. note:: IgPhyML must be installed. It can be downloaded from https://github.com/kbhoehn/IgPhyML. Args: input_file (str): Path to a Phylip-formatted multiple sequence alignment. Required. tree_file (str): Path to the output tree file. root (str): Name of the root sequence. Required. verbose (bool): If `True`, prints the standard output and standard error for each IgPhyML run. Default is `False`. ''' if shutil.which('igphyml') is None: raise RuntimeError('It appears that IgPhyML is not installed.\nPlease install and try again.') # first, tree topology is estimated with the M0/GY94 model igphyml_cmd1 = 'igphyml -i {} -m GY -w M0 -t e --run_id gy94'.format(aln_file) p1 = sp.Popen(igphyml_cmd1, stdout=sp.PIPE, stderr=sp.PIPE) stdout1, stderr1 = p1.communicate() if verbose: print(stdout1 + '\n') print(stderr1 + '\n\n') intermediate = input_file + '_igphyml_tree.txt_gy94' # now we fit the HLP17 model once the tree topology is fixed igphyml_cmd2 = 'igphyml -i {0} -m HLP17 --root {1} -o lr -u {}_igphyml_tree.txt_gy94 -o {}'.format(input_file, root, tree_file) p2 = sp.Popen(igphyml_cmd2, stdout=sp.PIPE, stderr=sp.PIPE) stdout2, stderr2 = p2.communicate() if verbose: print(stdout2 + '\n') print(stderr2 + '\n') return tree_file + '_igphyml_tree.txt'
python
def igphyml(input_file=None, tree_file=None, root=None, verbose=False): ''' Computes a phylogenetic tree using IgPhyML. .. note:: IgPhyML must be installed. It can be downloaded from https://github.com/kbhoehn/IgPhyML. Args: input_file (str): Path to a Phylip-formatted multiple sequence alignment. Required. tree_file (str): Path to the output tree file. root (str): Name of the root sequence. Required. verbose (bool): If `True`, prints the standard output and standard error for each IgPhyML run. Default is `False`. ''' if shutil.which('igphyml') is None: raise RuntimeError('It appears that IgPhyML is not installed.\nPlease install and try again.') # first, tree topology is estimated with the M0/GY94 model igphyml_cmd1 = 'igphyml -i {} -m GY -w M0 -t e --run_id gy94'.format(aln_file) p1 = sp.Popen(igphyml_cmd1, stdout=sp.PIPE, stderr=sp.PIPE) stdout1, stderr1 = p1.communicate() if verbose: print(stdout1 + '\n') print(stderr1 + '\n\n') intermediate = input_file + '_igphyml_tree.txt_gy94' # now we fit the HLP17 model once the tree topology is fixed igphyml_cmd2 = 'igphyml -i {0} -m HLP17 --root {1} -o lr -u {}_igphyml_tree.txt_gy94 -o {}'.format(input_file, root, tree_file) p2 = sp.Popen(igphyml_cmd2, stdout=sp.PIPE, stderr=sp.PIPE) stdout2, stderr2 = p2.communicate() if verbose: print(stdout2 + '\n') print(stderr2 + '\n') return tree_file + '_igphyml_tree.txt'
['def', 'igphyml', '(', 'input_file', '=', 'None', ',', 'tree_file', '=', 'None', ',', 'root', '=', 'None', ',', 'verbose', '=', 'False', ')', ':', 'if', 'shutil', '.', 'which', '(', "'igphyml'", ')', 'is', 'None', ':', 'raise', 'RuntimeError', '(', "'It appears that IgPhyML is not installed.\\nPlease install and try again.'", ')', '# first, tree topology is estimated with the M0/GY94 model', 'igphyml_cmd1', '=', "'igphyml -i {} -m GY -w M0 -t e --run_id gy94'", '.', 'format', '(', 'aln_file', ')', 'p1', '=', 'sp', '.', 'Popen', '(', 'igphyml_cmd1', ',', 'stdout', '=', 'sp', '.', 'PIPE', ',', 'stderr', '=', 'sp', '.', 'PIPE', ')', 'stdout1', ',', 'stderr1', '=', 'p1', '.', 'communicate', '(', ')', 'if', 'verbose', ':', 'print', '(', 'stdout1', '+', "'\\n'", ')', 'print', '(', 'stderr1', '+', "'\\n\\n'", ')', 'intermediate', '=', 'input_file', '+', "'_igphyml_tree.txt_gy94'", '# now we fit the HLP17 model once the tree topology is fixed', 'igphyml_cmd2', '=', "'igphyml -i {0} -m HLP17 --root {1} -o lr -u {}_igphyml_tree.txt_gy94 -o {}'", '.', 'format', '(', 'input_file', ',', 'root', ',', 'tree_file', ')', 'p2', '=', 'sp', '.', 'Popen', '(', 'igphyml_cmd2', ',', 'stdout', '=', 'sp', '.', 'PIPE', ',', 'stderr', '=', 'sp', '.', 'PIPE', ')', 'stdout2', ',', 'stderr2', '=', 'p2', '.', 'communicate', '(', ')', 'if', 'verbose', ':', 'print', '(', 'stdout2', '+', "'\\n'", ')', 'print', '(', 'stderr2', '+', "'\\n'", ')', 'return', 'tree_file', '+', "'_igphyml_tree.txt'"]
Computes a phylogenetic tree using IgPhyML. .. note:: IgPhyML must be installed. It can be downloaded from https://github.com/kbhoehn/IgPhyML. Args: input_file (str): Path to a Phylip-formatted multiple sequence alignment. Required. tree_file (str): Path to the output tree file. root (str): Name of the root sequence. Required. verbose (bool): If `True`, prints the standard output and standard error for each IgPhyML run. Default is `False`.
['Computes', 'a', 'phylogenetic', 'tree', 'using', 'IgPhyML', '.']
train
https://github.com/briney/abutils/blob/944755fc7d28bfc7d4f1ffad94ca0bf9d74ec54b/abutils/utils/phylogeny.py#L452-L493
1,265
cirruscluster/cirruscluster
cirruscluster/ext/ansible/runner/connection_plugins/fireball.py
Connection.exec_command
def exec_command(self, cmd, tmp_path, sudo_user, sudoable=False, executable='/bin/sh'): ''' run a command on the remote host ''' vvv("EXEC COMMAND %s" % cmd) if self.runner.sudo and sudoable: raise errors.AnsibleError("fireball does not use sudo, but runs as whoever it was initiated as. (That itself is where to use sudo).") data = dict( mode='command', cmd=cmd, tmp_path=tmp_path, executable=executable, ) data = utils.jsonify(data) data = utils.encrypt(self.key, data) self.socket.send(data) response = self.socket.recv() response = utils.decrypt(self.key, response) response = utils.parse_json(response) return (response.get('rc',None), '', response.get('stdout',''), response.get('stderr',''))
python
def exec_command(self, cmd, tmp_path, sudo_user, sudoable=False, executable='/bin/sh'): ''' run a command on the remote host ''' vvv("EXEC COMMAND %s" % cmd) if self.runner.sudo and sudoable: raise errors.AnsibleError("fireball does not use sudo, but runs as whoever it was initiated as. (That itself is where to use sudo).") data = dict( mode='command', cmd=cmd, tmp_path=tmp_path, executable=executable, ) data = utils.jsonify(data) data = utils.encrypt(self.key, data) self.socket.send(data) response = self.socket.recv() response = utils.decrypt(self.key, response) response = utils.parse_json(response) return (response.get('rc',None), '', response.get('stdout',''), response.get('stderr',''))
['def', 'exec_command', '(', 'self', ',', 'cmd', ',', 'tmp_path', ',', 'sudo_user', ',', 'sudoable', '=', 'False', ',', 'executable', '=', "'/bin/sh'", ')', ':', 'vvv', '(', '"EXEC COMMAND %s"', '%', 'cmd', ')', 'if', 'self', '.', 'runner', '.', 'sudo', 'and', 'sudoable', ':', 'raise', 'errors', '.', 'AnsibleError', '(', '"fireball does not use sudo, but runs as whoever it was initiated as. (That itself is where to use sudo)."', ')', 'data', '=', 'dict', '(', 'mode', '=', "'command'", ',', 'cmd', '=', 'cmd', ',', 'tmp_path', '=', 'tmp_path', ',', 'executable', '=', 'executable', ',', ')', 'data', '=', 'utils', '.', 'jsonify', '(', 'data', ')', 'data', '=', 'utils', '.', 'encrypt', '(', 'self', '.', 'key', ',', 'data', ')', 'self', '.', 'socket', '.', 'send', '(', 'data', ')', 'response', '=', 'self', '.', 'socket', '.', 'recv', '(', ')', 'response', '=', 'utils', '.', 'decrypt', '(', 'self', '.', 'key', ',', 'response', ')', 'response', '=', 'utils', '.', 'parse_json', '(', 'response', ')', 'return', '(', 'response', '.', 'get', '(', "'rc'", ',', 'None', ')', ',', "''", ',', 'response', '.', 'get', '(', "'stdout'", ',', "''", ')', ',', 'response', '.', 'get', '(', "'stderr'", ',', "''", ')', ')']
run a command on the remote host
['run', 'a', 'command', 'on', 'the', 'remote', 'host']
train
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/runner/connection_plugins/fireball.py#L70-L92
1,266
ssato/python-anyconfig
src/anyconfig/cli.py
_try_dump
def _try_dump(cnf, outpath, otype, fmsg, extra_opts=None): """ :param cnf: Configuration object to print out :param outpath: Output file path or None :param otype: Output type or None :param fmsg: message if it cannot detect otype by 'inpath' :param extra_opts: Map object will be given to API.dump as extra options """ if extra_opts is None: extra_opts = {} try: API.dump(cnf, outpath, otype, **extra_opts) except API.UnknownFileTypeError: _exit_with_output(fmsg % outpath, 1) except API.UnknownProcessorTypeError: _exit_with_output("Invalid output type '%s'" % otype, 1)
python
def _try_dump(cnf, outpath, otype, fmsg, extra_opts=None): """ :param cnf: Configuration object to print out :param outpath: Output file path or None :param otype: Output type or None :param fmsg: message if it cannot detect otype by 'inpath' :param extra_opts: Map object will be given to API.dump as extra options """ if extra_opts is None: extra_opts = {} try: API.dump(cnf, outpath, otype, **extra_opts) except API.UnknownFileTypeError: _exit_with_output(fmsg % outpath, 1) except API.UnknownProcessorTypeError: _exit_with_output("Invalid output type '%s'" % otype, 1)
['def', '_try_dump', '(', 'cnf', ',', 'outpath', ',', 'otype', ',', 'fmsg', ',', 'extra_opts', '=', 'None', ')', ':', 'if', 'extra_opts', 'is', 'None', ':', 'extra_opts', '=', '{', '}', 'try', ':', 'API', '.', 'dump', '(', 'cnf', ',', 'outpath', ',', 'otype', ',', '*', '*', 'extra_opts', ')', 'except', 'API', '.', 'UnknownFileTypeError', ':', '_exit_with_output', '(', 'fmsg', '%', 'outpath', ',', '1', ')', 'except', 'API', '.', 'UnknownProcessorTypeError', ':', '_exit_with_output', '(', '"Invalid output type \'%s\'"', '%', 'otype', ',', '1', ')']
:param cnf: Configuration object to print out :param outpath: Output file path or None :param otype: Output type or None :param fmsg: message if it cannot detect otype by 'inpath' :param extra_opts: Map object will be given to API.dump as extra options
[':', 'param', 'cnf', ':', 'Configuration', 'object', 'to', 'print', 'out', ':', 'param', 'outpath', ':', 'Output', 'file', 'path', 'or', 'None', ':', 'param', 'otype', ':', 'Output', 'type', 'or', 'None', ':', 'param', 'fmsg', ':', 'message', 'if', 'it', 'cannot', 'detect', 'otype', 'by', 'inpath', ':', 'param', 'extra_opts', ':', 'Map', 'object', 'will', 'be', 'given', 'to', 'API', '.', 'dump', 'as', 'extra', 'options']
train
https://github.com/ssato/python-anyconfig/blob/f2f4fb8d8e232aadea866c202e1dd7a5967e2877/src/anyconfig/cli.py#L291-L306
1,267
mishbahr/djangocms-forms
djangocms_forms/admin.py
FormSubmissionAdmin.render_export_form
def render_export_form(self, request, context, form_url=''): """ Render the from submission export form. """ context.update({ 'has_change_permission': self.has_change_permission(request), 'form_url': mark_safe(form_url), 'opts': self.opts, 'add': True, 'save_on_top': self.save_on_top, }) return TemplateResponse(request, self.export_form_template, context)
python
def render_export_form(self, request, context, form_url=''): """ Render the from submission export form. """ context.update({ 'has_change_permission': self.has_change_permission(request), 'form_url': mark_safe(form_url), 'opts': self.opts, 'add': True, 'save_on_top': self.save_on_top, }) return TemplateResponse(request, self.export_form_template, context)
['def', 'render_export_form', '(', 'self', ',', 'request', ',', 'context', ',', 'form_url', '=', "''", ')', ':', 'context', '.', 'update', '(', '{', "'has_change_permission'", ':', 'self', '.', 'has_change_permission', '(', 'request', ')', ',', "'form_url'", ':', 'mark_safe', '(', 'form_url', ')', ',', "'opts'", ':', 'self', '.', 'opts', ',', "'add'", ':', 'True', ',', "'save_on_top'", ':', 'self', '.', 'save_on_top', ',', '}', ')', 'return', 'TemplateResponse', '(', 'request', ',', 'self', '.', 'export_form_template', ',', 'context', ')']
Render the from submission export form.
['Render', 'the', 'from', 'submission', 'export', 'form', '.']
train
https://github.com/mishbahr/djangocms-forms/blob/9d7a4ef9769fd5e1526921c084d6da7b8070a2c1/djangocms_forms/admin.py#L260-L272
1,268
geopy/geopy
geopy/units.py
feet
def feet(kilometers=0, meters=0, miles=0, nautical=0): """ TODO docs. """ ret = 0. if nautical: kilometers += nautical / nm(1.) if meters: kilometers += meters / 1000. if kilometers: miles += mi(kilometers=kilometers) ret += miles * 5280 return ret
python
def feet(kilometers=0, meters=0, miles=0, nautical=0): """ TODO docs. """ ret = 0. if nautical: kilometers += nautical / nm(1.) if meters: kilometers += meters / 1000. if kilometers: miles += mi(kilometers=kilometers) ret += miles * 5280 return ret
['def', 'feet', '(', 'kilometers', '=', '0', ',', 'meters', '=', '0', ',', 'miles', '=', '0', ',', 'nautical', '=', '0', ')', ':', 'ret', '=', '0.', 'if', 'nautical', ':', 'kilometers', '+=', 'nautical', '/', 'nm', '(', '1.', ')', 'if', 'meters', ':', 'kilometers', '+=', 'meters', '/', '1000.', 'if', 'kilometers', ':', 'miles', '+=', 'mi', '(', 'kilometers', '=', 'kilometers', ')', 'ret', '+=', 'miles', '*', '5280', 'return', 'ret']
TODO docs.
['TODO', 'docs', '.']
train
https://github.com/geopy/geopy/blob/02c838d965e76497f3c3d61f53808c86b5c58224/geopy/units.py#L96-L108
1,269
openstack/pyghmi
pyghmi/ipmi/command.py
Command.get_bootdev
def get_bootdev(self): """Get current boot device override information. Provides the current requested boot device. Be aware that not all IPMI devices support this. Even in BMCs that claim to, occasionally the BIOS or UEFI fail to honor it. This is usually only applicable to the next reboot. :raises: IpmiException on an error. :returns: dict --The response will be provided in the return as a dict """ response = self.raw_command(netfn=0, command=9, data=(5, 0, 0)) # interpret response per 'get system boot options' if 'error' in response: raise exc.IpmiException(response['error']) # this should only be invoked for get system boot option complying to # ipmi spec and targeting the 'boot flags' parameter assert (response['command'] == 9 and response['netfn'] == 1 and response['data'][0] == 1 and (response['data'][1] & 0b1111111) == 5) if (response['data'][1] & 0b10000000 or not response['data'][2] & 0b10000000): return {'bootdev': 'default', 'persistent': True} else: # will consult data2 of the boot flags parameter for the data persistent = False uefimode = False if response['data'][2] & 0b1000000: persistent = True if response['data'][2] & 0b100000: uefimode = True bootnum = (response['data'][3] & 0b111100) >> 2 bootdev = boot_devices.get(bootnum) if bootdev: return {'bootdev': bootdev, 'persistent': persistent, 'uefimode': uefimode} else: return {'bootdev': bootnum, 'persistent': persistent, 'uefimode': uefimode}
python
def get_bootdev(self): """Get current boot device override information. Provides the current requested boot device. Be aware that not all IPMI devices support this. Even in BMCs that claim to, occasionally the BIOS or UEFI fail to honor it. This is usually only applicable to the next reboot. :raises: IpmiException on an error. :returns: dict --The response will be provided in the return as a dict """ response = self.raw_command(netfn=0, command=9, data=(5, 0, 0)) # interpret response per 'get system boot options' if 'error' in response: raise exc.IpmiException(response['error']) # this should only be invoked for get system boot option complying to # ipmi spec and targeting the 'boot flags' parameter assert (response['command'] == 9 and response['netfn'] == 1 and response['data'][0] == 1 and (response['data'][1] & 0b1111111) == 5) if (response['data'][1] & 0b10000000 or not response['data'][2] & 0b10000000): return {'bootdev': 'default', 'persistent': True} else: # will consult data2 of the boot flags parameter for the data persistent = False uefimode = False if response['data'][2] & 0b1000000: persistent = True if response['data'][2] & 0b100000: uefimode = True bootnum = (response['data'][3] & 0b111100) >> 2 bootdev = boot_devices.get(bootnum) if bootdev: return {'bootdev': bootdev, 'persistent': persistent, 'uefimode': uefimode} else: return {'bootdev': bootnum, 'persistent': persistent, 'uefimode': uefimode}
['def', 'get_bootdev', '(', 'self', ')', ':', 'response', '=', 'self', '.', 'raw_command', '(', 'netfn', '=', '0', ',', 'command', '=', '9', ',', 'data', '=', '(', '5', ',', '0', ',', '0', ')', ')', "# interpret response per 'get system boot options'", 'if', "'error'", 'in', 'response', ':', 'raise', 'exc', '.', 'IpmiException', '(', 'response', '[', "'error'", ']', ')', '# this should only be invoked for get system boot option complying to', "# ipmi spec and targeting the 'boot flags' parameter", 'assert', '(', 'response', '[', "'command'", ']', '==', '9', 'and', 'response', '[', "'netfn'", ']', '==', '1', 'and', 'response', '[', "'data'", ']', '[', '0', ']', '==', '1', 'and', '(', 'response', '[', "'data'", ']', '[', '1', ']', '&', '0b1111111', ')', '==', '5', ')', 'if', '(', 'response', '[', "'data'", ']', '[', '1', ']', '&', '0b10000000', 'or', 'not', 'response', '[', "'data'", ']', '[', '2', ']', '&', '0b10000000', ')', ':', 'return', '{', "'bootdev'", ':', "'default'", ',', "'persistent'", ':', 'True', '}', 'else', ':', '# will consult data2 of the boot flags parameter for the data', 'persistent', '=', 'False', 'uefimode', '=', 'False', 'if', 'response', '[', "'data'", ']', '[', '2', ']', '&', '0b1000000', ':', 'persistent', '=', 'True', 'if', 'response', '[', "'data'", ']', '[', '2', ']', '&', '0b100000', ':', 'uefimode', '=', 'True', 'bootnum', '=', '(', 'response', '[', "'data'", ']', '[', '3', ']', '&', '0b111100', ')', '>>', '2', 'bootdev', '=', 'boot_devices', '.', 'get', '(', 'bootnum', ')', 'if', 'bootdev', ':', 'return', '{', "'bootdev'", ':', 'bootdev', ',', "'persistent'", ':', 'persistent', ',', "'uefimode'", ':', 'uefimode', '}', 'else', ':', 'return', '{', "'bootdev'", ':', 'bootnum', ',', "'persistent'", ':', 'persistent', ',', "'uefimode'", ':', 'uefimode', '}']
Get current boot device override information. Provides the current requested boot device. Be aware that not all IPMI devices support this. Even in BMCs that claim to, occasionally the BIOS or UEFI fail to honor it. This is usually only applicable to the next reboot. :raises: IpmiException on an error. :returns: dict --The response will be provided in the return as a dict
['Get', 'current', 'boot', 'device', 'override', 'information', '.']
train
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/command.py#L241-L281
1,270
tensorflow/tensor2tensor
tensor2tensor/rl/rl_utils.py
random_rollout_subsequences
def random_rollout_subsequences(rollouts, num_subsequences, subsequence_length): """Chooses a random frame sequence of given length from a set of rollouts.""" def choose_subsequence(): # TODO(koz4k): Weigh rollouts by their lengths so sampling is uniform over # frames and not rollouts. rollout = random.choice(rollouts) try: from_index = random.randrange(len(rollout) - subsequence_length + 1) except ValueError: # Rollout too short; repeat. return choose_subsequence() return rollout[from_index:(from_index + subsequence_length)] return [choose_subsequence() for _ in range(num_subsequences)]
python
def random_rollout_subsequences(rollouts, num_subsequences, subsequence_length): """Chooses a random frame sequence of given length from a set of rollouts.""" def choose_subsequence(): # TODO(koz4k): Weigh rollouts by their lengths so sampling is uniform over # frames and not rollouts. rollout = random.choice(rollouts) try: from_index = random.randrange(len(rollout) - subsequence_length + 1) except ValueError: # Rollout too short; repeat. return choose_subsequence() return rollout[from_index:(from_index + subsequence_length)] return [choose_subsequence() for _ in range(num_subsequences)]
['def', 'random_rollout_subsequences', '(', 'rollouts', ',', 'num_subsequences', ',', 'subsequence_length', ')', ':', 'def', 'choose_subsequence', '(', ')', ':', '# TODO(koz4k): Weigh rollouts by their lengths so sampling is uniform over', '# frames and not rollouts.', 'rollout', '=', 'random', '.', 'choice', '(', 'rollouts', ')', 'try', ':', 'from_index', '=', 'random', '.', 'randrange', '(', 'len', '(', 'rollout', ')', '-', 'subsequence_length', '+', '1', ')', 'except', 'ValueError', ':', '# Rollout too short; repeat.', 'return', 'choose_subsequence', '(', ')', 'return', 'rollout', '[', 'from_index', ':', '(', 'from_index', '+', 'subsequence_length', ')', ']', 'return', '[', 'choose_subsequence', '(', ')', 'for', '_', 'in', 'range', '(', 'num_subsequences', ')', ']']
Chooses a random frame sequence of given length from a set of rollouts.
['Chooses', 'a', 'random', 'frame', 'sequence', 'of', 'given', 'length', 'from', 'a', 'set', 'of', 'rollouts', '.']
train
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/rl_utils.py#L323-L336
1,271
osrg/ryu
ryu/lib/lacplib.py
LacpLib._add_flow_v1_2
def _add_flow_v1_2(self, src, port, timeout, datapath): """enter a flow entry for the packet from the slave i/f with idle_timeout. for OpenFlow ver1.2 and ver1.3.""" ofproto = datapath.ofproto parser = datapath.ofproto_parser match = parser.OFPMatch( in_port=port, eth_src=src, eth_type=ether.ETH_TYPE_SLOW) actions = [parser.OFPActionOutput( ofproto.OFPP_CONTROLLER, ofproto.OFPCML_MAX)] inst = [parser.OFPInstructionActions( ofproto.OFPIT_APPLY_ACTIONS, actions)] mod = parser.OFPFlowMod( datapath=datapath, command=ofproto.OFPFC_ADD, idle_timeout=timeout, priority=65535, flags=ofproto.OFPFF_SEND_FLOW_REM, match=match, instructions=inst) datapath.send_msg(mod)
python
def _add_flow_v1_2(self, src, port, timeout, datapath): """enter a flow entry for the packet from the slave i/f with idle_timeout. for OpenFlow ver1.2 and ver1.3.""" ofproto = datapath.ofproto parser = datapath.ofproto_parser match = parser.OFPMatch( in_port=port, eth_src=src, eth_type=ether.ETH_TYPE_SLOW) actions = [parser.OFPActionOutput( ofproto.OFPP_CONTROLLER, ofproto.OFPCML_MAX)] inst = [parser.OFPInstructionActions( ofproto.OFPIT_APPLY_ACTIONS, actions)] mod = parser.OFPFlowMod( datapath=datapath, command=ofproto.OFPFC_ADD, idle_timeout=timeout, priority=65535, flags=ofproto.OFPFF_SEND_FLOW_REM, match=match, instructions=inst) datapath.send_msg(mod)
['def', '_add_flow_v1_2', '(', 'self', ',', 'src', ',', 'port', ',', 'timeout', ',', 'datapath', ')', ':', 'ofproto', '=', 'datapath', '.', 'ofproto', 'parser', '=', 'datapath', '.', 'ofproto_parser', 'match', '=', 'parser', '.', 'OFPMatch', '(', 'in_port', '=', 'port', ',', 'eth_src', '=', 'src', ',', 'eth_type', '=', 'ether', '.', 'ETH_TYPE_SLOW', ')', 'actions', '=', '[', 'parser', '.', 'OFPActionOutput', '(', 'ofproto', '.', 'OFPP_CONTROLLER', ',', 'ofproto', '.', 'OFPCML_MAX', ')', ']', 'inst', '=', '[', 'parser', '.', 'OFPInstructionActions', '(', 'ofproto', '.', 'OFPIT_APPLY_ACTIONS', ',', 'actions', ')', ']', 'mod', '=', 'parser', '.', 'OFPFlowMod', '(', 'datapath', '=', 'datapath', ',', 'command', '=', 'ofproto', '.', 'OFPFC_ADD', ',', 'idle_timeout', '=', 'timeout', ',', 'priority', '=', '65535', ',', 'flags', '=', 'ofproto', '.', 'OFPFF_SEND_FLOW_REM', ',', 'match', '=', 'match', ',', 'instructions', '=', 'inst', ')', 'datapath', '.', 'send_msg', '(', 'mod', ')']
enter a flow entry for the packet from the slave i/f with idle_timeout. for OpenFlow ver1.2 and ver1.3.
['enter', 'a', 'flow', 'entry', 'for', 'the', 'packet', 'from', 'the', 'slave', 'i', '/', 'f', 'with', 'idle_timeout', '.', 'for', 'OpenFlow', 'ver1', '.', '2', 'and', 'ver1', '.', '3', '.']
train
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/lib/lacplib.py#L305-L322
1,272
cocaine/cocaine-tools
cocaine/tools/dispatch.py
keyring_remove
def keyring_remove(key, yes, **kwargs): """ Removes a public key from the keyring. Does nothing if a key is already not in the keyring. If none is specified - clears the keyring. To force the cocaine-runtime to refresh its keyring, call `refresh` method. """ if key is None: if not yes: click.confirm('Are you sure you want to remove all keys?', abort=True) ctx = Context(**kwargs) ctx.execute_action('keyring:remove', **{ 'key': key, 'storage': ctx.repo.create_secure_service('storage'), })
python
def keyring_remove(key, yes, **kwargs): """ Removes a public key from the keyring. Does nothing if a key is already not in the keyring. If none is specified - clears the keyring. To force the cocaine-runtime to refresh its keyring, call `refresh` method. """ if key is None: if not yes: click.confirm('Are you sure you want to remove all keys?', abort=True) ctx = Context(**kwargs) ctx.execute_action('keyring:remove', **{ 'key': key, 'storage': ctx.repo.create_secure_service('storage'), })
['def', 'keyring_remove', '(', 'key', ',', 'yes', ',', '*', '*', 'kwargs', ')', ':', 'if', 'key', 'is', 'None', ':', 'if', 'not', 'yes', ':', 'click', '.', 'confirm', '(', "'Are you sure you want to remove all keys?'", ',', 'abort', '=', 'True', ')', 'ctx', '=', 'Context', '(', '*', '*', 'kwargs', ')', 'ctx', '.', 'execute_action', '(', "'keyring:remove'", ',', '*', '*', '{', "'key'", ':', 'key', ',', "'storage'", ':', 'ctx', '.', 'repo', '.', 'create_secure_service', '(', "'storage'", ')', ',', '}', ')']
Removes a public key from the keyring. Does nothing if a key is already not in the keyring. If none is specified - clears the keyring. To force the cocaine-runtime to refresh its keyring, call `refresh` method.
['Removes', 'a', 'public', 'key', 'from', 'the', 'keyring', '.']
train
https://github.com/cocaine/cocaine-tools/blob/d8834f8e04ca42817d5f4e368d471484d4b3419f/cocaine/tools/dispatch.py#L1998-L2013
1,273
PetrochukM/PyTorch-NLP
torchnlp/encoders/text/subword_text_tokenizer.py
SubwordTextTokenizer.build_to_target_size_from_token_counts
def build_to_target_size_from_token_counts(cls, target_size, token_counts, min_val, max_val, num_iterations=4): """Builds a SubwordTextTokenizer that has `vocab_size` near `target_size`. Uses simple recursive binary search to find a minimum token count that most closely matches the `target_size`. Args: target_size: Desired vocab_size to approximate. token_counts: A dictionary of token counts, mapping string to int. min_val: An integer; lower bound for the minimum token count. max_val: An integer; upper bound for the minimum token count. num_iterations: An integer; how many iterations of refinement. Returns: A SubwordTextTokenizer instance. Raises: ValueError: If `min_val` is greater than `max_val`. """ if min_val > max_val: raise ValueError("Lower bound for the minimum token count " "is greater than the upper bound.") def bisect(min_val, max_val): """Bisection to find the right size.""" present_count = (max_val + min_val) // 2 logger.info("Trying min_count %d" % present_count) subtokenizer = cls() subtokenizer.build_from_token_counts(token_counts, present_count, num_iterations) logger.info("min_count %d attained a %d vocab_size", present_count, subtokenizer.vocab_size) # If min_val == max_val, we can't do any better than this. if subtokenizer.vocab_size == target_size or min_val >= max_val: return subtokenizer if subtokenizer.vocab_size > target_size: other_subtokenizer = bisect(present_count + 1, max_val) else: other_subtokenizer = bisect(min_val, present_count - 1) if other_subtokenizer is None: return subtokenizer if (abs(other_subtokenizer.vocab_size - target_size) < abs(subtokenizer.vocab_size - target_size)): return other_subtokenizer return subtokenizer return bisect(min_val, max_val)
python
def build_to_target_size_from_token_counts(cls, target_size, token_counts, min_val, max_val, num_iterations=4): """Builds a SubwordTextTokenizer that has `vocab_size` near `target_size`. Uses simple recursive binary search to find a minimum token count that most closely matches the `target_size`. Args: target_size: Desired vocab_size to approximate. token_counts: A dictionary of token counts, mapping string to int. min_val: An integer; lower bound for the minimum token count. max_val: An integer; upper bound for the minimum token count. num_iterations: An integer; how many iterations of refinement. Returns: A SubwordTextTokenizer instance. Raises: ValueError: If `min_val` is greater than `max_val`. """ if min_val > max_val: raise ValueError("Lower bound for the minimum token count " "is greater than the upper bound.") def bisect(min_val, max_val): """Bisection to find the right size.""" present_count = (max_val + min_val) // 2 logger.info("Trying min_count %d" % present_count) subtokenizer = cls() subtokenizer.build_from_token_counts(token_counts, present_count, num_iterations) logger.info("min_count %d attained a %d vocab_size", present_count, subtokenizer.vocab_size) # If min_val == max_val, we can't do any better than this. if subtokenizer.vocab_size == target_size or min_val >= max_val: return subtokenizer if subtokenizer.vocab_size > target_size: other_subtokenizer = bisect(present_count + 1, max_val) else: other_subtokenizer = bisect(min_val, present_count - 1) if other_subtokenizer is None: return subtokenizer if (abs(other_subtokenizer.vocab_size - target_size) < abs(subtokenizer.vocab_size - target_size)): return other_subtokenizer return subtokenizer return bisect(min_val, max_val)
['def', 'build_to_target_size_from_token_counts', '(', 'cls', ',', 'target_size', ',', 'token_counts', ',', 'min_val', ',', 'max_val', ',', 'num_iterations', '=', '4', ')', ':', 'if', 'min_val', '>', 'max_val', ':', 'raise', 'ValueError', '(', '"Lower bound for the minimum token count "', '"is greater than the upper bound."', ')', 'def', 'bisect', '(', 'min_val', ',', 'max_val', ')', ':', '"""Bisection to find the right size."""', 'present_count', '=', '(', 'max_val', '+', 'min_val', ')', '//', '2', 'logger', '.', 'info', '(', '"Trying min_count %d"', '%', 'present_count', ')', 'subtokenizer', '=', 'cls', '(', ')', 'subtokenizer', '.', 'build_from_token_counts', '(', 'token_counts', ',', 'present_count', ',', 'num_iterations', ')', 'logger', '.', 'info', '(', '"min_count %d attained a %d vocab_size"', ',', 'present_count', ',', 'subtokenizer', '.', 'vocab_size', ')', "# If min_val == max_val, we can't do any better than this.", 'if', 'subtokenizer', '.', 'vocab_size', '==', 'target_size', 'or', 'min_val', '>=', 'max_val', ':', 'return', 'subtokenizer', 'if', 'subtokenizer', '.', 'vocab_size', '>', 'target_size', ':', 'other_subtokenizer', '=', 'bisect', '(', 'present_count', '+', '1', ',', 'max_val', ')', 'else', ':', 'other_subtokenizer', '=', 'bisect', '(', 'min_val', ',', 'present_count', '-', '1', ')', 'if', 'other_subtokenizer', 'is', 'None', ':', 'return', 'subtokenizer', 'if', '(', 'abs', '(', 'other_subtokenizer', '.', 'vocab_size', '-', 'target_size', ')', '<', 'abs', '(', 'subtokenizer', '.', 'vocab_size', '-', 'target_size', ')', ')', ':', 'return', 'other_subtokenizer', 'return', 'subtokenizer', 'return', 'bisect', '(', 'min_val', ',', 'max_val', ')']
Builds a SubwordTextTokenizer that has `vocab_size` near `target_size`. Uses simple recursive binary search to find a minimum token count that most closely matches the `target_size`. Args: target_size: Desired vocab_size to approximate. token_counts: A dictionary of token counts, mapping string to int. min_val: An integer; lower bound for the minimum token count. max_val: An integer; upper bound for the minimum token count. num_iterations: An integer; how many iterations of refinement. Returns: A SubwordTextTokenizer instance. Raises: ValueError: If `min_val` is greater than `max_val`.
['Builds', 'a', 'SubwordTextTokenizer', 'that', 'has', 'vocab_size', 'near', 'target_size', '.']
train
https://github.com/PetrochukM/PyTorch-NLP/blob/5f7320da5c8d781df072fab3f7e421c6347e5bfa/torchnlp/encoders/text/subword_text_tokenizer.py#L280-L334
1,274
swift-nav/libsbp
generator/sbpg/targets/protobuf.py
to_identifier
def to_identifier(s): """ Convert snake_case to camel_case. """ if s.startswith('GPS'): s = 'Gps' + s[3:] return ''.join([i.capitalize() for i in s.split('_')]) if '_' in s else s
python
def to_identifier(s): """ Convert snake_case to camel_case. """ if s.startswith('GPS'): s = 'Gps' + s[3:] return ''.join([i.capitalize() for i in s.split('_')]) if '_' in s else s
['def', 'to_identifier', '(', 's', ')', ':', 'if', 's', '.', 'startswith', '(', "'GPS'", ')', ':', 's', '=', "'Gps'", '+', 's', '[', '3', ':', ']', 'return', "''", '.', 'join', '(', '[', 'i', '.', 'capitalize', '(', ')', 'for', 'i', 'in', 's', '.', 'split', '(', "'_'", ')', ']', ')', 'if', "'_'", 'in', 's', 'else', 's']
Convert snake_case to camel_case.
['Convert', 'snake_case', 'to', 'camel_case', '.']
train
https://github.com/swift-nav/libsbp/blob/5a950608506b23e31b73ef7065da905b646055c1/generator/sbpg/targets/protobuf.py#L50-L56
1,275
peterwittek/ncpol2sdpa
ncpol2sdpa/physics_utils.py
define_objective_with_I
def define_objective_with_I(I, *args): """Define a polynomial using measurements and an I matrix describing a Bell inequality. :param I: The I matrix of a Bell inequality in the Collins-Gisin notation. :type I: list of list of int. :param args: Either the measurements of Alice and Bob or a `Probability` class describing their measurement operators. :type A: tuple of list of list of :class:`sympy.physics.quantum.operator.HermitianOperator` or :class:`ncpol2sdpa.Probability` :returns: :class:`sympy.core.expr.Expr` -- the objective function to be solved as a minimization problem to find the maximum quantum violation. Note that the sign is flipped compared to the Bell inequality. """ objective = I[0][0] if len(args) > 2 or len(args) == 0: raise Exception("Wrong number of arguments!") elif len(args) == 1: A = args[0].parties[0] B = args[0].parties[1] else: A = args[0] B = args[1] i, j = 0, 1 # Row and column index in I for m_Bj in B: # Define first row for Bj in m_Bj: objective += I[i][j] * Bj j += 1 i += 1 for m_Ai in A: for Ai in m_Ai: objective += I[i][0] * Ai j = 1 for m_Bj in B: for Bj in m_Bj: objective += I[i][j] * Ai * Bj j += 1 i += 1 return -objective
python
def define_objective_with_I(I, *args): """Define a polynomial using measurements and an I matrix describing a Bell inequality. :param I: The I matrix of a Bell inequality in the Collins-Gisin notation. :type I: list of list of int. :param args: Either the measurements of Alice and Bob or a `Probability` class describing their measurement operators. :type A: tuple of list of list of :class:`sympy.physics.quantum.operator.HermitianOperator` or :class:`ncpol2sdpa.Probability` :returns: :class:`sympy.core.expr.Expr` -- the objective function to be solved as a minimization problem to find the maximum quantum violation. Note that the sign is flipped compared to the Bell inequality. """ objective = I[0][0] if len(args) > 2 or len(args) == 0: raise Exception("Wrong number of arguments!") elif len(args) == 1: A = args[0].parties[0] B = args[0].parties[1] else: A = args[0] B = args[1] i, j = 0, 1 # Row and column index in I for m_Bj in B: # Define first row for Bj in m_Bj: objective += I[i][j] * Bj j += 1 i += 1 for m_Ai in A: for Ai in m_Ai: objective += I[i][0] * Ai j = 1 for m_Bj in B: for Bj in m_Bj: objective += I[i][j] * Ai * Bj j += 1 i += 1 return -objective
['def', 'define_objective_with_I', '(', 'I', ',', '*', 'args', ')', ':', 'objective', '=', 'I', '[', '0', ']', '[', '0', ']', 'if', 'len', '(', 'args', ')', '>', '2', 'or', 'len', '(', 'args', ')', '==', '0', ':', 'raise', 'Exception', '(', '"Wrong number of arguments!"', ')', 'elif', 'len', '(', 'args', ')', '==', '1', ':', 'A', '=', 'args', '[', '0', ']', '.', 'parties', '[', '0', ']', 'B', '=', 'args', '[', '0', ']', '.', 'parties', '[', '1', ']', 'else', ':', 'A', '=', 'args', '[', '0', ']', 'B', '=', 'args', '[', '1', ']', 'i', ',', 'j', '=', '0', ',', '1', '# Row and column index in I', 'for', 'm_Bj', 'in', 'B', ':', '# Define first row', 'for', 'Bj', 'in', 'm_Bj', ':', 'objective', '+=', 'I', '[', 'i', ']', '[', 'j', ']', '*', 'Bj', 'j', '+=', '1', 'i', '+=', '1', 'for', 'm_Ai', 'in', 'A', ':', 'for', 'Ai', 'in', 'm_Ai', ':', 'objective', '+=', 'I', '[', 'i', ']', '[', '0', ']', '*', 'Ai', 'j', '=', '1', 'for', 'm_Bj', 'in', 'B', ':', 'for', 'Bj', 'in', 'm_Bj', ':', 'objective', '+=', 'I', '[', 'i', ']', '[', 'j', ']', '*', 'Ai', '*', 'Bj', 'j', '+=', '1', 'i', '+=', '1', 'return', '-', 'objective']
Define a polynomial using measurements and an I matrix describing a Bell inequality. :param I: The I matrix of a Bell inequality in the Collins-Gisin notation. :type I: list of list of int. :param args: Either the measurements of Alice and Bob or a `Probability` class describing their measurement operators. :type A: tuple of list of list of :class:`sympy.physics.quantum.operator.HermitianOperator` or :class:`ncpol2sdpa.Probability` :returns: :class:`sympy.core.expr.Expr` -- the objective function to be solved as a minimization problem to find the maximum quantum violation. Note that the sign is flipped compared to the Bell inequality.
['Define', 'a', 'polynomial', 'using', 'measurements', 'and', 'an', 'I', 'matrix', 'describing', 'a', 'Bell', 'inequality', '.']
train
https://github.com/peterwittek/ncpol2sdpa/blob/bce75d524d0b9d0093f32e3a0a5611f8589351a7/ncpol2sdpa/physics_utils.py#L219-L260
1,276
brainiak/brainiak
brainiak/funcalign/rsrm.py
RSRM._init_individual
def _init_individual(subjs, voxels, TRs): """Initializes the individual components `S_i` to empty (all zeros). Parameters ---------- subjs : int The number of subjects. voxels : list of int A list with the number of voxels per subject. TRs : int The number of timepoints in the data. Returns ------- S : list of 2D array, element i has shape=[voxels_i, timepoints] The individual component :math:`S_i` for each subject initialized to zero. """ return [np.zeros((voxels[i], TRs)) for i in range(subjs)]
python
def _init_individual(subjs, voxels, TRs): """Initializes the individual components `S_i` to empty (all zeros). Parameters ---------- subjs : int The number of subjects. voxels : list of int A list with the number of voxels per subject. TRs : int The number of timepoints in the data. Returns ------- S : list of 2D array, element i has shape=[voxels_i, timepoints] The individual component :math:`S_i` for each subject initialized to zero. """ return [np.zeros((voxels[i], TRs)) for i in range(subjs)]
['def', '_init_individual', '(', 'subjs', ',', 'voxels', ',', 'TRs', ')', ':', 'return', '[', 'np', '.', 'zeros', '(', '(', 'voxels', '[', 'i', ']', ',', 'TRs', ')', ')', 'for', 'i', 'in', 'range', '(', 'subjs', ')', ']']
Initializes the individual components `S_i` to empty (all zeros). Parameters ---------- subjs : int The number of subjects. voxels : list of int A list with the number of voxels per subject. TRs : int The number of timepoints in the data. Returns ------- S : list of 2D array, element i has shape=[voxels_i, timepoints] The individual component :math:`S_i` for each subject initialized to zero.
['Initializes', 'the', 'individual', 'components', 'S_i', 'to', 'empty', '(', 'all', 'zeros', ')', '.']
train
https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/funcalign/rsrm.py#L420-L442
1,277
LuqueDaniel/pybooru
pybooru/api_danbooru.py
DanbooruApi_Mixin.post_copy_notes
def post_copy_notes(self, post_id, other_post_id): """Function to copy notes (requires login). Parameters: post_id (int): other_post_id (int): The id of the post to copy notes to. """ return self._get('posts/{0}/copy_notes.json'.format(post_id), {'other_post_id': other_post_id}, 'PUT', auth=True)
python
def post_copy_notes(self, post_id, other_post_id): """Function to copy notes (requires login). Parameters: post_id (int): other_post_id (int): The id of the post to copy notes to. """ return self._get('posts/{0}/copy_notes.json'.format(post_id), {'other_post_id': other_post_id}, 'PUT', auth=True)
['def', 'post_copy_notes', '(', 'self', ',', 'post_id', ',', 'other_post_id', ')', ':', 'return', 'self', '.', '_get', '(', "'posts/{0}/copy_notes.json'", '.', 'format', '(', 'post_id', ')', ',', '{', "'other_post_id'", ':', 'other_post_id', '}', ',', "'PUT'", ',', 'auth', '=', 'True', ')']
Function to copy notes (requires login). Parameters: post_id (int): other_post_id (int): The id of the post to copy notes to.
['Function', 'to', 'copy', 'notes', '(', 'requires', 'login', ')', '.']
train
https://github.com/LuqueDaniel/pybooru/blob/60cd5254684d293b308f0b11b8f4ac2dce101479/pybooru/api_danbooru.py#L92-L100
1,278
GoogleCloudPlatform/cloud-debug-python
src/googleclouddebugger/gcp_hub_client.py
GcpHubClient.Stop
def Stop(self): """Signals the worker threads to shut down and waits until it exits.""" self._shutdown = True self._new_updates.set() # Wake up the transmission thread. if self._main_thread is not None: self._main_thread.join() self._main_thread = None if self._transmission_thread is not None: self._transmission_thread.join() self._transmission_thread = None
python
def Stop(self): """Signals the worker threads to shut down and waits until it exits.""" self._shutdown = True self._new_updates.set() # Wake up the transmission thread. if self._main_thread is not None: self._main_thread.join() self._main_thread = None if self._transmission_thread is not None: self._transmission_thread.join() self._transmission_thread = None
['def', 'Stop', '(', 'self', ')', ':', 'self', '.', '_shutdown', '=', 'True', 'self', '.', '_new_updates', '.', 'set', '(', ')', '# Wake up the transmission thread.', 'if', 'self', '.', '_main_thread', 'is', 'not', 'None', ':', 'self', '.', '_main_thread', '.', 'join', '(', ')', 'self', '.', '_main_thread', '=', 'None', 'if', 'self', '.', '_transmission_thread', 'is', 'not', 'None', ':', 'self', '.', '_transmission_thread', '.', 'join', '(', ')', 'self', '.', '_transmission_thread', '=', 'None']
Signals the worker threads to shut down and waits until it exits.
['Signals', 'the', 'worker', 'threads', 'to', 'shut', 'down', 'and', 'waits', 'until', 'it', 'exits', '.']
train
https://github.com/GoogleCloudPlatform/cloud-debug-python/blob/89ce3782c98b814838a3ecb5479ed3882368cbee/src/googleclouddebugger/gcp_hub_client.py#L229-L240
1,279
denniskempin/safetynet
safetynet.py
_TypecheckDecorator
def _TypecheckDecorator(subject=None, **kwargs): """Dispatches type checks based on what the subject is. Functions or methods are annotated directly. If this method is called with keyword arguments only, return a decorator. """ if subject is None: return _TypecheckDecoratorFactory(kwargs) elif inspect.isfunction(subject) or inspect.ismethod(subject): return _TypecheckFunction(subject, {}, 2, None) else: raise TypeError()
python
def _TypecheckDecorator(subject=None, **kwargs): """Dispatches type checks based on what the subject is. Functions or methods are annotated directly. If this method is called with keyword arguments only, return a decorator. """ if subject is None: return _TypecheckDecoratorFactory(kwargs) elif inspect.isfunction(subject) or inspect.ismethod(subject): return _TypecheckFunction(subject, {}, 2, None) else: raise TypeError()
['def', '_TypecheckDecorator', '(', 'subject', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'if', 'subject', 'is', 'None', ':', 'return', '_TypecheckDecoratorFactory', '(', 'kwargs', ')', 'elif', 'inspect', '.', 'isfunction', '(', 'subject', ')', 'or', 'inspect', '.', 'ismethod', '(', 'subject', ')', ':', 'return', '_TypecheckFunction', '(', 'subject', ',', '{', '}', ',', '2', ',', 'None', ')', 'else', ':', 'raise', 'TypeError', '(', ')']
Dispatches type checks based on what the subject is. Functions or methods are annotated directly. If this method is called with keyword arguments only, return a decorator.
['Dispatches', 'type', 'checks', 'based', 'on', 'what', 'the', 'subject', 'is', '.']
train
https://github.com/denniskempin/safetynet/blob/fbcc4a112370fc20696f003d901114b4fe26d984/safetynet.py#L459-L470
1,280
h2oai/datatable
datatable/xls.py
_parse_row
def _parse_row(rowvalues, rowtypes): """ Scan a single row from an Excel file, and return the list of ranges corresponding to each consecutive span of non-empty cells in this row. If all cells are empty, return an empty list. Each "range" in the list is a tuple of the form `(startcol, endcol)`. For example, if the row is the following: [ ][ 1.0 ][ 23 ][ "foo" ][ ][ "hello" ][ ] then the returned list of ranges will be: [(1, 4), (5, 6)] This algorithm considers a cell to be empty if its type is 0 (XL_EMPTY), or 6 (XL_BLANK), or if it's a text cell containing empty string, or a whitespace-only string. Numeric `0` is not considered empty. """ n = len(rowvalues) assert n == len(rowtypes) if not n: return [] range_start = None ranges = [] for i in range(n): ctype = rowtypes[i] cval = rowvalues[i] # Check whether the cell is empty or not. If it is empty, and there is # an active range being tracked - terminate it. On the other hand, if # the cell is not empty and there isn't an active range, then start it. if ctype == 0 or ctype == 6 or (ctype == 1 and (cval == "" or cval.isspace())): if range_start is not None: ranges.append((range_start, i)) range_start = None else: if range_start is None: range_start = i if range_start is not None: ranges.append((range_start, n)) return ranges
python
def _parse_row(rowvalues, rowtypes): """ Scan a single row from an Excel file, and return the list of ranges corresponding to each consecutive span of non-empty cells in this row. If all cells are empty, return an empty list. Each "range" in the list is a tuple of the form `(startcol, endcol)`. For example, if the row is the following: [ ][ 1.0 ][ 23 ][ "foo" ][ ][ "hello" ][ ] then the returned list of ranges will be: [(1, 4), (5, 6)] This algorithm considers a cell to be empty if its type is 0 (XL_EMPTY), or 6 (XL_BLANK), or if it's a text cell containing empty string, or a whitespace-only string. Numeric `0` is not considered empty. """ n = len(rowvalues) assert n == len(rowtypes) if not n: return [] range_start = None ranges = [] for i in range(n): ctype = rowtypes[i] cval = rowvalues[i] # Check whether the cell is empty or not. If it is empty, and there is # an active range being tracked - terminate it. On the other hand, if # the cell is not empty and there isn't an active range, then start it. if ctype == 0 or ctype == 6 or (ctype == 1 and (cval == "" or cval.isspace())): if range_start is not None: ranges.append((range_start, i)) range_start = None else: if range_start is None: range_start = i if range_start is not None: ranges.append((range_start, n)) return ranges
['def', '_parse_row', '(', 'rowvalues', ',', 'rowtypes', ')', ':', 'n', '=', 'len', '(', 'rowvalues', ')', 'assert', 'n', '==', 'len', '(', 'rowtypes', ')', 'if', 'not', 'n', ':', 'return', '[', ']', 'range_start', '=', 'None', 'ranges', '=', '[', ']', 'for', 'i', 'in', 'range', '(', 'n', ')', ':', 'ctype', '=', 'rowtypes', '[', 'i', ']', 'cval', '=', 'rowvalues', '[', 'i', ']', '# Check whether the cell is empty or not. If it is empty, and there is', '# an active range being tracked - terminate it. On the other hand, if', "# the cell is not empty and there isn't an active range, then start it.", 'if', 'ctype', '==', '0', 'or', 'ctype', '==', '6', 'or', '(', 'ctype', '==', '1', 'and', '(', 'cval', '==', '""', 'or', 'cval', '.', 'isspace', '(', ')', ')', ')', ':', 'if', 'range_start', 'is', 'not', 'None', ':', 'ranges', '.', 'append', '(', '(', 'range_start', ',', 'i', ')', ')', 'range_start', '=', 'None', 'else', ':', 'if', 'range_start', 'is', 'None', ':', 'range_start', '=', 'i', 'if', 'range_start', 'is', 'not', 'None', ':', 'ranges', '.', 'append', '(', '(', 'range_start', ',', 'n', ')', ')', 'return', 'ranges']
Scan a single row from an Excel file, and return the list of ranges corresponding to each consecutive span of non-empty cells in this row. If all cells are empty, return an empty list. Each "range" in the list is a tuple of the form `(startcol, endcol)`. For example, if the row is the following: [ ][ 1.0 ][ 23 ][ "foo" ][ ][ "hello" ][ ] then the returned list of ranges will be: [(1, 4), (5, 6)] This algorithm considers a cell to be empty if its type is 0 (XL_EMPTY), or 6 (XL_BLANK), or if it's a text cell containing empty string, or a whitespace-only string. Numeric `0` is not considered empty.
['Scan', 'a', 'single', 'row', 'from', 'an', 'Excel', 'file', 'and', 'return', 'the', 'list', 'of', 'ranges', 'corresponding', 'to', 'each', 'consecutive', 'span', 'of', 'non', '-', 'empty', 'cells', 'in', 'this', 'row', '.', 'If', 'all', 'cells', 'are', 'empty', 'return', 'an', 'empty', 'list', '.', 'Each', 'range', 'in', 'the', 'list', 'is', 'a', 'tuple', 'of', 'the', 'form', '(', 'startcol', 'endcol', ')', '.']
train
https://github.com/h2oai/datatable/blob/dd5fba74d2ca85b66f82ae3c1e0b6ea2fd792564/datatable/xls.py#L106-L147
1,281
fracpete/python-weka-wrapper3
python/weka/core/classes.py
Environment.variable_names
def variable_names(self): """ Returns the names of all environment variables. :return: the names of the variables :rtype: list """ result = [] names = javabridge.call(self.jobject, "getVariableNames", "()Ljava/util/Set;") for name in javabridge.iterate_collection(names): result.append(javabridge.to_string(name)) return result
python
def variable_names(self): """ Returns the names of all environment variables. :return: the names of the variables :rtype: list """ result = [] names = javabridge.call(self.jobject, "getVariableNames", "()Ljava/util/Set;") for name in javabridge.iterate_collection(names): result.append(javabridge.to_string(name)) return result
['def', 'variable_names', '(', 'self', ')', ':', 'result', '=', '[', ']', 'names', '=', 'javabridge', '.', 'call', '(', 'self', '.', 'jobject', ',', '"getVariableNames"', ',', '"()Ljava/util/Set;"', ')', 'for', 'name', 'in', 'javabridge', '.', 'iterate_collection', '(', 'names', ')', ':', 'result', '.', 'append', '(', 'javabridge', '.', 'to_string', '(', 'name', ')', ')', 'return', 'result']
Returns the names of all environment variables. :return: the names of the variables :rtype: list
['Returns', 'the', 'names', 'of', 'all', 'environment', 'variables', '.']
train
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/core/classes.py#L707-L718
1,282
Kozea/pygal
pygal/graph/dot.py
Dot.dot
def dot(self, serie, r_max): """Draw a dot line""" serie_node = self.svg.serie(serie) view_values = list(map(self.view, serie.points)) for i, value in safe_enumerate(serie.values): x, y = view_values[i] if self.logarithmic: log10min = log10(self._min) - 1 log10max = log10(self._max or 1) if value != 0: size = r_max * ((log10(abs(value)) - log10min) / (log10max - log10min)) else: size = 0 else: size = r_max * (abs(value) / (self._max or 1)) metadata = serie.metadata.get(i) dots = decorate( self.svg, self.svg.node(serie_node['plot'], class_="dots"), metadata ) alter( self.svg.node( dots, 'circle', cx=x, cy=y, r=size, class_='dot reactive tooltip-trigger' + (' negative' if value < 0 else '') ), metadata ) val = self._format(serie, i) self._tooltip_data( dots, val, x, y, 'centered', self._get_x_label(i) ) self._static_value(serie_node, val, x, y, metadata)
python
def dot(self, serie, r_max): """Draw a dot line""" serie_node = self.svg.serie(serie) view_values = list(map(self.view, serie.points)) for i, value in safe_enumerate(serie.values): x, y = view_values[i] if self.logarithmic: log10min = log10(self._min) - 1 log10max = log10(self._max or 1) if value != 0: size = r_max * ((log10(abs(value)) - log10min) / (log10max - log10min)) else: size = 0 else: size = r_max * (abs(value) / (self._max or 1)) metadata = serie.metadata.get(i) dots = decorate( self.svg, self.svg.node(serie_node['plot'], class_="dots"), metadata ) alter( self.svg.node( dots, 'circle', cx=x, cy=y, r=size, class_='dot reactive tooltip-trigger' + (' negative' if value < 0 else '') ), metadata ) val = self._format(serie, i) self._tooltip_data( dots, val, x, y, 'centered', self._get_x_label(i) ) self._static_value(serie_node, val, x, y, metadata)
['def', 'dot', '(', 'self', ',', 'serie', ',', 'r_max', ')', ':', 'serie_node', '=', 'self', '.', 'svg', '.', 'serie', '(', 'serie', ')', 'view_values', '=', 'list', '(', 'map', '(', 'self', '.', 'view', ',', 'serie', '.', 'points', ')', ')', 'for', 'i', ',', 'value', 'in', 'safe_enumerate', '(', 'serie', '.', 'values', ')', ':', 'x', ',', 'y', '=', 'view_values', '[', 'i', ']', 'if', 'self', '.', 'logarithmic', ':', 'log10min', '=', 'log10', '(', 'self', '.', '_min', ')', '-', '1', 'log10max', '=', 'log10', '(', 'self', '.', '_max', 'or', '1', ')', 'if', 'value', '!=', '0', ':', 'size', '=', 'r_max', '*', '(', '(', 'log10', '(', 'abs', '(', 'value', ')', ')', '-', 'log10min', ')', '/', '(', 'log10max', '-', 'log10min', ')', ')', 'else', ':', 'size', '=', '0', 'else', ':', 'size', '=', 'r_max', '*', '(', 'abs', '(', 'value', ')', '/', '(', 'self', '.', '_max', 'or', '1', ')', ')', 'metadata', '=', 'serie', '.', 'metadata', '.', 'get', '(', 'i', ')', 'dots', '=', 'decorate', '(', 'self', '.', 'svg', ',', 'self', '.', 'svg', '.', 'node', '(', 'serie_node', '[', "'plot'", ']', ',', 'class_', '=', '"dots"', ')', ',', 'metadata', ')', 'alter', '(', 'self', '.', 'svg', '.', 'node', '(', 'dots', ',', "'circle'", ',', 'cx', '=', 'x', ',', 'cy', '=', 'y', ',', 'r', '=', 'size', ',', 'class_', '=', "'dot reactive tooltip-trigger'", '+', '(', "' negative'", 'if', 'value', '<', '0', 'else', "''", ')', ')', ',', 'metadata', ')', 'val', '=', 'self', '.', '_format', '(', 'serie', ',', 'i', ')', 'self', '.', '_tooltip_data', '(', 'dots', ',', 'val', ',', 'x', ',', 'y', ',', "'centered'", ',', 'self', '.', '_get_x_label', '(', 'i', ')', ')', 'self', '.', '_static_value', '(', 'serie_node', ',', 'val', ',', 'x', ',', 'y', ',', 'metadata', ')']
Draw a dot line
['Draw', 'a', 'dot', 'line']
train
https://github.com/Kozea/pygal/blob/5e25c98a59a0642eecd9fcc5dbfeeb2190fbb5e7/pygal/graph/dot.py#L37-L77
1,283
genialis/resolwe
resolwe/flow/models/data.py
Data.create_entity
def create_entity(self): """Create entity if `flow_collection` is defined in process. Following rules applies for adding `Data` object to `Entity`: * Only add `Data object` to `Entity` if process has defined `flow_collection` field * Add object to existing `Entity`, if all parents that are part of it (but not necessary all parents), are part of the same `Entity` * If parents belong to different `Entities` or do not belong to any `Entity`, create new `Entity` """ entity_type = self.process.entity_type # pylint: disable=no-member entity_descriptor_schema = self.process.entity_descriptor_schema # pylint: disable=no-member entity_input = self.process.entity_input # pylint: disable=no-member if entity_type: data_filter = {} if entity_input: input_id = dict_dot(self.input, entity_input, default=lambda: None) if input_id is None: logger.warning("Skipping creation of entity due to missing input.") return if isinstance(input_id, int): data_filter['data__pk'] = input_id elif isinstance(input_id, list): data_filter['data__pk__in'] = input_id else: raise ValueError( "Cannot create entity due to invalid value of field {}.".format(entity_input) ) else: data_filter['data__in'] = self.parents.all() # pylint: disable=no-member entity_query = Entity.objects.filter(type=entity_type, **data_filter).distinct() entity_count = entity_query.count() if entity_count == 0: descriptor_schema = DescriptorSchema.objects.filter( slug=entity_descriptor_schema ).latest() entity = Entity.objects.create( contributor=self.contributor, descriptor_schema=descriptor_schema, type=entity_type, name=self.name, tags=self.tags, ) assign_contributor_permissions(entity) elif entity_count == 1: entity = entity_query.first() copy_permissions(entity, self) else: logger.info("Skipping creation of entity due to multiple entities found.") entity = None if entity: entity.data.add(self) # Inherit collections from entity. for collection in entity.collections.all(): collection.data.add(self)
python
def create_entity(self): """Create entity if `flow_collection` is defined in process. Following rules applies for adding `Data` object to `Entity`: * Only add `Data object` to `Entity` if process has defined `flow_collection` field * Add object to existing `Entity`, if all parents that are part of it (but not necessary all parents), are part of the same `Entity` * If parents belong to different `Entities` or do not belong to any `Entity`, create new `Entity` """ entity_type = self.process.entity_type # pylint: disable=no-member entity_descriptor_schema = self.process.entity_descriptor_schema # pylint: disable=no-member entity_input = self.process.entity_input # pylint: disable=no-member if entity_type: data_filter = {} if entity_input: input_id = dict_dot(self.input, entity_input, default=lambda: None) if input_id is None: logger.warning("Skipping creation of entity due to missing input.") return if isinstance(input_id, int): data_filter['data__pk'] = input_id elif isinstance(input_id, list): data_filter['data__pk__in'] = input_id else: raise ValueError( "Cannot create entity due to invalid value of field {}.".format(entity_input) ) else: data_filter['data__in'] = self.parents.all() # pylint: disable=no-member entity_query = Entity.objects.filter(type=entity_type, **data_filter).distinct() entity_count = entity_query.count() if entity_count == 0: descriptor_schema = DescriptorSchema.objects.filter( slug=entity_descriptor_schema ).latest() entity = Entity.objects.create( contributor=self.contributor, descriptor_schema=descriptor_schema, type=entity_type, name=self.name, tags=self.tags, ) assign_contributor_permissions(entity) elif entity_count == 1: entity = entity_query.first() copy_permissions(entity, self) else: logger.info("Skipping creation of entity due to multiple entities found.") entity = None if entity: entity.data.add(self) # Inherit collections from entity. for collection in entity.collections.all(): collection.data.add(self)
['def', 'create_entity', '(', 'self', ')', ':', 'entity_type', '=', 'self', '.', 'process', '.', 'entity_type', '# pylint: disable=no-member', 'entity_descriptor_schema', '=', 'self', '.', 'process', '.', 'entity_descriptor_schema', '# pylint: disable=no-member', 'entity_input', '=', 'self', '.', 'process', '.', 'entity_input', '# pylint: disable=no-member', 'if', 'entity_type', ':', 'data_filter', '=', '{', '}', 'if', 'entity_input', ':', 'input_id', '=', 'dict_dot', '(', 'self', '.', 'input', ',', 'entity_input', ',', 'default', '=', 'lambda', ':', 'None', ')', 'if', 'input_id', 'is', 'None', ':', 'logger', '.', 'warning', '(', '"Skipping creation of entity due to missing input."', ')', 'return', 'if', 'isinstance', '(', 'input_id', ',', 'int', ')', ':', 'data_filter', '[', "'data__pk'", ']', '=', 'input_id', 'elif', 'isinstance', '(', 'input_id', ',', 'list', ')', ':', 'data_filter', '[', "'data__pk__in'", ']', '=', 'input_id', 'else', ':', 'raise', 'ValueError', '(', '"Cannot create entity due to invalid value of field {}."', '.', 'format', '(', 'entity_input', ')', ')', 'else', ':', 'data_filter', '[', "'data__in'", ']', '=', 'self', '.', 'parents', '.', 'all', '(', ')', '# pylint: disable=no-member', 'entity_query', '=', 'Entity', '.', 'objects', '.', 'filter', '(', 'type', '=', 'entity_type', ',', '*', '*', 'data_filter', ')', '.', 'distinct', '(', ')', 'entity_count', '=', 'entity_query', '.', 'count', '(', ')', 'if', 'entity_count', '==', '0', ':', 'descriptor_schema', '=', 'DescriptorSchema', '.', 'objects', '.', 'filter', '(', 'slug', '=', 'entity_descriptor_schema', ')', '.', 'latest', '(', ')', 'entity', '=', 'Entity', '.', 'objects', '.', 'create', '(', 'contributor', '=', 'self', '.', 'contributor', ',', 'descriptor_schema', '=', 'descriptor_schema', ',', 'type', '=', 'entity_type', ',', 'name', '=', 'self', '.', 'name', ',', 'tags', '=', 'self', '.', 'tags', ',', ')', 'assign_contributor_permissions', '(', 'entity', ')', 'elif', 'entity_count', '==', '1', ':', 'entity', '=', 'entity_query', '.', 'first', '(', ')', 'copy_permissions', '(', 'entity', ',', 'self', ')', 'else', ':', 'logger', '.', 'info', '(', '"Skipping creation of entity due to multiple entities found."', ')', 'entity', '=', 'None', 'if', 'entity', ':', 'entity', '.', 'data', '.', 'add', '(', 'self', ')', '# Inherit collections from entity.', 'for', 'collection', 'in', 'entity', '.', 'collections', '.', 'all', '(', ')', ':', 'collection', '.', 'data', '.', 'add', '(', 'self', ')']
Create entity if `flow_collection` is defined in process. Following rules applies for adding `Data` object to `Entity`: * Only add `Data object` to `Entity` if process has defined `flow_collection` field * Add object to existing `Entity`, if all parents that are part of it (but not necessary all parents), are part of the same `Entity` * If parents belong to different `Entities` or do not belong to any `Entity`, create new `Entity`
['Create', 'entity', 'if', 'flow_collection', 'is', 'defined', 'in', 'process', '.']
train
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/models/data.py#L338-L401
1,284
jaywink/federation
federation/entities/diaspora/mappers.py
message_to_objects
def message_to_objects( message: str, sender: str, sender_key_fetcher:Callable[[str], str]=None, user: UserType =None, ) -> List: """Takes in a message extracted by a protocol and maps it to entities. :param message: XML payload :type message: str :param sender: Payload sender id :type message: str :param sender_key_fetcher: Function to fetch sender public key. If not given, key will always be fetched over network. The function should take sender handle as the only parameter. :param user: Optional receiving user object. If given, should have a `handle`. :returns: list of entities """ doc = etree.fromstring(message) if doc.tag in TAGS: return element_to_objects(doc, sender, sender_key_fetcher, user) return []
python
def message_to_objects( message: str, sender: str, sender_key_fetcher:Callable[[str], str]=None, user: UserType =None, ) -> List: """Takes in a message extracted by a protocol and maps it to entities. :param message: XML payload :type message: str :param sender: Payload sender id :type message: str :param sender_key_fetcher: Function to fetch sender public key. If not given, key will always be fetched over network. The function should take sender handle as the only parameter. :param user: Optional receiving user object. If given, should have a `handle`. :returns: list of entities """ doc = etree.fromstring(message) if doc.tag in TAGS: return element_to_objects(doc, sender, sender_key_fetcher, user) return []
['def', 'message_to_objects', '(', 'message', ':', 'str', ',', 'sender', ':', 'str', ',', 'sender_key_fetcher', ':', 'Callable', '[', '[', 'str', ']', ',', 'str', ']', '=', 'None', ',', 'user', ':', 'UserType', '=', 'None', ',', ')', '->', 'List', ':', 'doc', '=', 'etree', '.', 'fromstring', '(', 'message', ')', 'if', 'doc', '.', 'tag', 'in', 'TAGS', ':', 'return', 'element_to_objects', '(', 'doc', ',', 'sender', ',', 'sender_key_fetcher', ',', 'user', ')', 'return', '[', ']']
Takes in a message extracted by a protocol and maps it to entities. :param message: XML payload :type message: str :param sender: Payload sender id :type message: str :param sender_key_fetcher: Function to fetch sender public key. If not given, key will always be fetched over network. The function should take sender handle as the only parameter. :param user: Optional receiving user object. If given, should have a `handle`. :returns: list of entities
['Takes', 'in', 'a', 'message', 'extracted', 'by', 'a', 'protocol', 'and', 'maps', 'it', 'to', 'entities', '.']
train
https://github.com/jaywink/federation/blob/59d31bb37e662891dbea72c1dee05dc53146c78b/federation/entities/diaspora/mappers.py#L138-L155
1,285
tensorflow/tensorboard
tensorboard/plugins/image/images_demo.py
get_image
def get_image(verbose=False): """Get the image as a TensorFlow variable. Returns: A `tf.Variable`, which must be initialized prior to use: invoke `sess.run(result.initializer)`.""" base_data = tf.constant(image_data(verbose=verbose)) base_image = tf.image.decode_image(base_data, channels=3) base_image.set_shape((IMAGE_HEIGHT, IMAGE_WIDTH, 3)) parsed_image = tf.Variable(base_image, name='image', dtype=tf.uint8) return parsed_image
python
def get_image(verbose=False): """Get the image as a TensorFlow variable. Returns: A `tf.Variable`, which must be initialized prior to use: invoke `sess.run(result.initializer)`.""" base_data = tf.constant(image_data(verbose=verbose)) base_image = tf.image.decode_image(base_data, channels=3) base_image.set_shape((IMAGE_HEIGHT, IMAGE_WIDTH, 3)) parsed_image = tf.Variable(base_image, name='image', dtype=tf.uint8) return parsed_image
['def', 'get_image', '(', 'verbose', '=', 'False', ')', ':', 'base_data', '=', 'tf', '.', 'constant', '(', 'image_data', '(', 'verbose', '=', 'verbose', ')', ')', 'base_image', '=', 'tf', '.', 'image', '.', 'decode_image', '(', 'base_data', ',', 'channels', '=', '3', ')', 'base_image', '.', 'set_shape', '(', '(', 'IMAGE_HEIGHT', ',', 'IMAGE_WIDTH', ',', '3', ')', ')', 'parsed_image', '=', 'tf', '.', 'Variable', '(', 'base_image', ',', 'name', '=', "'image'", ',', 'dtype', '=', 'tf', '.', 'uint8', ')', 'return', 'parsed_image']
Get the image as a TensorFlow variable. Returns: A `tf.Variable`, which must be initialized prior to use: invoke `sess.run(result.initializer)`.
['Get', 'the', 'image', 'as', 'a', 'TensorFlow', 'variable', '.']
train
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/image/images_demo.py#L99-L109
1,286
softlayer/softlayer-python
SoftLayer/managers/vs.py
VSManager.list_instances
def list_instances(self, hourly=True, monthly=True, tags=None, cpus=None, memory=None, hostname=None, domain=None, local_disk=None, datacenter=None, nic_speed=None, public_ip=None, private_ip=None, **kwargs): """Retrieve a list of all virtual servers on the account. Example:: # Print out a list of hourly instances in the DAL05 data center. for vsi in mgr.list_instances(hourly=True, datacenter='dal05'): print vsi['fullyQualifiedDomainName'], vsi['primaryIpAddress'] # Using a custom object-mask. Will get ONLY what is specified object_mask = "mask[hostname,monitoringRobot[robotStatus]]" for vsi in mgr.list_instances(mask=object_mask,hourly=True): print vsi :param boolean hourly: include hourly instances :param boolean monthly: include monthly instances :param list tags: filter based on list of tags :param integer cpus: filter based on number of CPUS :param integer memory: filter based on amount of memory :param string hostname: filter based on hostname :param string domain: filter based on domain :param string local_disk: filter based on local_disk :param string datacenter: filter based on datacenter :param integer nic_speed: filter based on network speed (in MBPS) :param string public_ip: filter based on public ip address :param string private_ip: filter based on private ip address :param dict \\*\\*kwargs: response-level options (mask, limit, etc.) :returns: Returns a list of dictionaries representing the matching virtual servers """ if 'mask' not in kwargs: items = [ 'id', 'globalIdentifier', 'hostname', 'domain', 'fullyQualifiedDomainName', 'primaryBackendIpAddress', 'primaryIpAddress', 'lastKnownPowerState.name', 'powerState', 'maxCpu', 'maxMemory', 'datacenter', 'activeTransaction.transactionStatus[friendlyName,name]', 'status', ] kwargs['mask'] = "mask[%s]" % ','.join(items) call = 'getVirtualGuests' if not all([hourly, monthly]): if hourly: call = 'getHourlyVirtualGuests' elif monthly: call = 'getMonthlyVirtualGuests' _filter = utils.NestedDict(kwargs.get('filter') or {}) if tags: _filter['virtualGuests']['tagReferences']['tag']['name'] = { 'operation': 'in', 'options': [{'name': 'data', 'value': tags}], } if cpus: _filter['virtualGuests']['maxCpu'] = utils.query_filter(cpus) if memory: _filter['virtualGuests']['maxMemory'] = utils.query_filter(memory) if hostname: _filter['virtualGuests']['hostname'] = utils.query_filter(hostname) if domain: _filter['virtualGuests']['domain'] = utils.query_filter(domain) if local_disk is not None: _filter['virtualGuests']['localDiskFlag'] = ( utils.query_filter(bool(local_disk))) if datacenter: _filter['virtualGuests']['datacenter']['name'] = ( utils.query_filter(datacenter)) if nic_speed: _filter['virtualGuests']['networkComponents']['maxSpeed'] = ( utils.query_filter(nic_speed)) if public_ip: _filter['virtualGuests']['primaryIpAddress'] = ( utils.query_filter(public_ip)) if private_ip: _filter['virtualGuests']['primaryBackendIpAddress'] = ( utils.query_filter(private_ip)) kwargs['filter'] = _filter.to_dict() kwargs['iter'] = True return self.client.call('Account', call, **kwargs)
python
def list_instances(self, hourly=True, monthly=True, tags=None, cpus=None, memory=None, hostname=None, domain=None, local_disk=None, datacenter=None, nic_speed=None, public_ip=None, private_ip=None, **kwargs): """Retrieve a list of all virtual servers on the account. Example:: # Print out a list of hourly instances in the DAL05 data center. for vsi in mgr.list_instances(hourly=True, datacenter='dal05'): print vsi['fullyQualifiedDomainName'], vsi['primaryIpAddress'] # Using a custom object-mask. Will get ONLY what is specified object_mask = "mask[hostname,monitoringRobot[robotStatus]]" for vsi in mgr.list_instances(mask=object_mask,hourly=True): print vsi :param boolean hourly: include hourly instances :param boolean monthly: include monthly instances :param list tags: filter based on list of tags :param integer cpus: filter based on number of CPUS :param integer memory: filter based on amount of memory :param string hostname: filter based on hostname :param string domain: filter based on domain :param string local_disk: filter based on local_disk :param string datacenter: filter based on datacenter :param integer nic_speed: filter based on network speed (in MBPS) :param string public_ip: filter based on public ip address :param string private_ip: filter based on private ip address :param dict \\*\\*kwargs: response-level options (mask, limit, etc.) :returns: Returns a list of dictionaries representing the matching virtual servers """ if 'mask' not in kwargs: items = [ 'id', 'globalIdentifier', 'hostname', 'domain', 'fullyQualifiedDomainName', 'primaryBackendIpAddress', 'primaryIpAddress', 'lastKnownPowerState.name', 'powerState', 'maxCpu', 'maxMemory', 'datacenter', 'activeTransaction.transactionStatus[friendlyName,name]', 'status', ] kwargs['mask'] = "mask[%s]" % ','.join(items) call = 'getVirtualGuests' if not all([hourly, monthly]): if hourly: call = 'getHourlyVirtualGuests' elif monthly: call = 'getMonthlyVirtualGuests' _filter = utils.NestedDict(kwargs.get('filter') or {}) if tags: _filter['virtualGuests']['tagReferences']['tag']['name'] = { 'operation': 'in', 'options': [{'name': 'data', 'value': tags}], } if cpus: _filter['virtualGuests']['maxCpu'] = utils.query_filter(cpus) if memory: _filter['virtualGuests']['maxMemory'] = utils.query_filter(memory) if hostname: _filter['virtualGuests']['hostname'] = utils.query_filter(hostname) if domain: _filter['virtualGuests']['domain'] = utils.query_filter(domain) if local_disk is not None: _filter['virtualGuests']['localDiskFlag'] = ( utils.query_filter(bool(local_disk))) if datacenter: _filter['virtualGuests']['datacenter']['name'] = ( utils.query_filter(datacenter)) if nic_speed: _filter['virtualGuests']['networkComponents']['maxSpeed'] = ( utils.query_filter(nic_speed)) if public_ip: _filter['virtualGuests']['primaryIpAddress'] = ( utils.query_filter(public_ip)) if private_ip: _filter['virtualGuests']['primaryBackendIpAddress'] = ( utils.query_filter(private_ip)) kwargs['filter'] = _filter.to_dict() kwargs['iter'] = True return self.client.call('Account', call, **kwargs)
['def', 'list_instances', '(', 'self', ',', 'hourly', '=', 'True', ',', 'monthly', '=', 'True', ',', 'tags', '=', 'None', ',', 'cpus', '=', 'None', ',', 'memory', '=', 'None', ',', 'hostname', '=', 'None', ',', 'domain', '=', 'None', ',', 'local_disk', '=', 'None', ',', 'datacenter', '=', 'None', ',', 'nic_speed', '=', 'None', ',', 'public_ip', '=', 'None', ',', 'private_ip', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'if', "'mask'", 'not', 'in', 'kwargs', ':', 'items', '=', '[', "'id'", ',', "'globalIdentifier'", ',', "'hostname'", ',', "'domain'", ',', "'fullyQualifiedDomainName'", ',', "'primaryBackendIpAddress'", ',', "'primaryIpAddress'", ',', "'lastKnownPowerState.name'", ',', "'powerState'", ',', "'maxCpu'", ',', "'maxMemory'", ',', "'datacenter'", ',', "'activeTransaction.transactionStatus[friendlyName,name]'", ',', "'status'", ',', ']', 'kwargs', '[', "'mask'", ']', '=', '"mask[%s]"', '%', "','", '.', 'join', '(', 'items', ')', 'call', '=', "'getVirtualGuests'", 'if', 'not', 'all', '(', '[', 'hourly', ',', 'monthly', ']', ')', ':', 'if', 'hourly', ':', 'call', '=', "'getHourlyVirtualGuests'", 'elif', 'monthly', ':', 'call', '=', "'getMonthlyVirtualGuests'", '_filter', '=', 'utils', '.', 'NestedDict', '(', 'kwargs', '.', 'get', '(', "'filter'", ')', 'or', '{', '}', ')', 'if', 'tags', ':', '_filter', '[', "'virtualGuests'", ']', '[', "'tagReferences'", ']', '[', "'tag'", ']', '[', "'name'", ']', '=', '{', "'operation'", ':', "'in'", ',', "'options'", ':', '[', '{', "'name'", ':', "'data'", ',', "'value'", ':', 'tags', '}', ']', ',', '}', 'if', 'cpus', ':', '_filter', '[', "'virtualGuests'", ']', '[', "'maxCpu'", ']', '=', 'utils', '.', 'query_filter', '(', 'cpus', ')', 'if', 'memory', ':', '_filter', '[', "'virtualGuests'", ']', '[', "'maxMemory'", ']', '=', 'utils', '.', 'query_filter', '(', 'memory', ')', 'if', 'hostname', ':', '_filter', '[', "'virtualGuests'", ']', '[', "'hostname'", ']', '=', 'utils', '.', 'query_filter', '(', 'hostname', ')', 'if', 'domain', ':', '_filter', '[', "'virtualGuests'", ']', '[', "'domain'", ']', '=', 'utils', '.', 'query_filter', '(', 'domain', ')', 'if', 'local_disk', 'is', 'not', 'None', ':', '_filter', '[', "'virtualGuests'", ']', '[', "'localDiskFlag'", ']', '=', '(', 'utils', '.', 'query_filter', '(', 'bool', '(', 'local_disk', ')', ')', ')', 'if', 'datacenter', ':', '_filter', '[', "'virtualGuests'", ']', '[', "'datacenter'", ']', '[', "'name'", ']', '=', '(', 'utils', '.', 'query_filter', '(', 'datacenter', ')', ')', 'if', 'nic_speed', ':', '_filter', '[', "'virtualGuests'", ']', '[', "'networkComponents'", ']', '[', "'maxSpeed'", ']', '=', '(', 'utils', '.', 'query_filter', '(', 'nic_speed', ')', ')', 'if', 'public_ip', ':', '_filter', '[', "'virtualGuests'", ']', '[', "'primaryIpAddress'", ']', '=', '(', 'utils', '.', 'query_filter', '(', 'public_ip', ')', ')', 'if', 'private_ip', ':', '_filter', '[', "'virtualGuests'", ']', '[', "'primaryBackendIpAddress'", ']', '=', '(', 'utils', '.', 'query_filter', '(', 'private_ip', ')', ')', 'kwargs', '[', "'filter'", ']', '=', '_filter', '.', 'to_dict', '(', ')', 'kwargs', '[', "'iter'", ']', '=', 'True', 'return', 'self', '.', 'client', '.', 'call', '(', "'Account'", ',', 'call', ',', '*', '*', 'kwargs', ')']
Retrieve a list of all virtual servers on the account. Example:: # Print out a list of hourly instances in the DAL05 data center. for vsi in mgr.list_instances(hourly=True, datacenter='dal05'): print vsi['fullyQualifiedDomainName'], vsi['primaryIpAddress'] # Using a custom object-mask. Will get ONLY what is specified object_mask = "mask[hostname,monitoringRobot[robotStatus]]" for vsi in mgr.list_instances(mask=object_mask,hourly=True): print vsi :param boolean hourly: include hourly instances :param boolean monthly: include monthly instances :param list tags: filter based on list of tags :param integer cpus: filter based on number of CPUS :param integer memory: filter based on amount of memory :param string hostname: filter based on hostname :param string domain: filter based on domain :param string local_disk: filter based on local_disk :param string datacenter: filter based on datacenter :param integer nic_speed: filter based on network speed (in MBPS) :param string public_ip: filter based on public ip address :param string private_ip: filter based on private ip address :param dict \\*\\*kwargs: response-level options (mask, limit, etc.) :returns: Returns a list of dictionaries representing the matching virtual servers
['Retrieve', 'a', 'list', 'of', 'all', 'virtual', 'servers', 'on', 'the', 'account', '.']
train
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/vs.py#L61-L162
1,287
mottosso/be
be/cli.py
tab
def tab(topics, complete): """Utility sub-command for tabcompletion This command is meant to be called by a tab completion function and is given a the currently entered topics, along with a boolean indicating whether or not the last entered argument is complete. """ # Discard `be tab` topics = list(topics)[2:] # When given an incomplete argument, # the argument is *sometimes* returned twice (?) # .. note:: Seen in Git Bash on Windows # $ be in giant [TAB] # -> ['giant'] # $ be in gi[TAB] # -> ['gi', 'gi'] if len(topics) > 1 and topics[-1] == topics[-2]: topics.pop() # Suggest projects if len(topics) == 0: projects = lib.list_projects(root=_extern.cwd()) sys.stdout.write(" ".join(projects)) elif len(topics) == 1: project = topics[0] projects = lib.list_projects(root=_extern.cwd()) # Complete project if not complete: projects = [i for i in projects if i.startswith(project)] sys.stdout.write(" ".join(projects)) else: # Suggest items from inventory inventory = _extern.load_inventory(project) inventory = lib.list_inventory(inventory) items = [i for i, b in inventory] sys.stdout.write(" ".join(items)) else: project, item = topics[:2] # Complete inventory item if len(topics) == 2 and not complete: inventory = _extern.load_inventory(project) inventory = lib.list_inventory(inventory) items = [i for i, b in inventory] items = [i for i in items if i.startswith(item)] sys.stdout.write(" ".join(items)) # Suggest items from template else: try: be = _extern.load_be(project) templates = _extern.load_templates(project) inventory = _extern.load_inventory(project) item = topics[-1] items = lib.list_template(root=_extern.cwd(), topics=topics, templates=templates, inventory=inventory, be=be) if not complete: items = lib.list_template(root=_extern.cwd(), topics=topics[:-1], templates=templates, inventory=inventory, be=be) items = [i for i in items if i.startswith(item)] sys.stdout.write(" ".join(items) + " ") else: sys.stdout.write(" ".join(items) + " ") except IndexError: sys.exit(lib.NORMAL)
python
def tab(topics, complete): """Utility sub-command for tabcompletion This command is meant to be called by a tab completion function and is given a the currently entered topics, along with a boolean indicating whether or not the last entered argument is complete. """ # Discard `be tab` topics = list(topics)[2:] # When given an incomplete argument, # the argument is *sometimes* returned twice (?) # .. note:: Seen in Git Bash on Windows # $ be in giant [TAB] # -> ['giant'] # $ be in gi[TAB] # -> ['gi', 'gi'] if len(topics) > 1 and topics[-1] == topics[-2]: topics.pop() # Suggest projects if len(topics) == 0: projects = lib.list_projects(root=_extern.cwd()) sys.stdout.write(" ".join(projects)) elif len(topics) == 1: project = topics[0] projects = lib.list_projects(root=_extern.cwd()) # Complete project if not complete: projects = [i for i in projects if i.startswith(project)] sys.stdout.write(" ".join(projects)) else: # Suggest items from inventory inventory = _extern.load_inventory(project) inventory = lib.list_inventory(inventory) items = [i for i, b in inventory] sys.stdout.write(" ".join(items)) else: project, item = topics[:2] # Complete inventory item if len(topics) == 2 and not complete: inventory = _extern.load_inventory(project) inventory = lib.list_inventory(inventory) items = [i for i, b in inventory] items = [i for i in items if i.startswith(item)] sys.stdout.write(" ".join(items)) # Suggest items from template else: try: be = _extern.load_be(project) templates = _extern.load_templates(project) inventory = _extern.load_inventory(project) item = topics[-1] items = lib.list_template(root=_extern.cwd(), topics=topics, templates=templates, inventory=inventory, be=be) if not complete: items = lib.list_template(root=_extern.cwd(), topics=topics[:-1], templates=templates, inventory=inventory, be=be) items = [i for i in items if i.startswith(item)] sys.stdout.write(" ".join(items) + " ") else: sys.stdout.write(" ".join(items) + " ") except IndexError: sys.exit(lib.NORMAL)
['def', 'tab', '(', 'topics', ',', 'complete', ')', ':', '# Discard `be tab`', 'topics', '=', 'list', '(', 'topics', ')', '[', '2', ':', ']', '# When given an incomplete argument,', '# the argument is *sometimes* returned twice (?)', '# .. note:: Seen in Git Bash on Windows', '# $ be in giant [TAB]', "# -> ['giant']", '# $ be in gi[TAB]', "# -> ['gi', 'gi']", 'if', 'len', '(', 'topics', ')', '>', '1', 'and', 'topics', '[', '-', '1', ']', '==', 'topics', '[', '-', '2', ']', ':', 'topics', '.', 'pop', '(', ')', '# Suggest projects', 'if', 'len', '(', 'topics', ')', '==', '0', ':', 'projects', '=', 'lib', '.', 'list_projects', '(', 'root', '=', '_extern', '.', 'cwd', '(', ')', ')', 'sys', '.', 'stdout', '.', 'write', '(', '" "', '.', 'join', '(', 'projects', ')', ')', 'elif', 'len', '(', 'topics', ')', '==', '1', ':', 'project', '=', 'topics', '[', '0', ']', 'projects', '=', 'lib', '.', 'list_projects', '(', 'root', '=', '_extern', '.', 'cwd', '(', ')', ')', '# Complete project', 'if', 'not', 'complete', ':', 'projects', '=', '[', 'i', 'for', 'i', 'in', 'projects', 'if', 'i', '.', 'startswith', '(', 'project', ')', ']', 'sys', '.', 'stdout', '.', 'write', '(', '" "', '.', 'join', '(', 'projects', ')', ')', 'else', ':', '# Suggest items from inventory', 'inventory', '=', '_extern', '.', 'load_inventory', '(', 'project', ')', 'inventory', '=', 'lib', '.', 'list_inventory', '(', 'inventory', ')', 'items', '=', '[', 'i', 'for', 'i', ',', 'b', 'in', 'inventory', ']', 'sys', '.', 'stdout', '.', 'write', '(', '" "', '.', 'join', '(', 'items', ')', ')', 'else', ':', 'project', ',', 'item', '=', 'topics', '[', ':', '2', ']', '# Complete inventory item', 'if', 'len', '(', 'topics', ')', '==', '2', 'and', 'not', 'complete', ':', 'inventory', '=', '_extern', '.', 'load_inventory', '(', 'project', ')', 'inventory', '=', 'lib', '.', 'list_inventory', '(', 'inventory', ')', 'items', '=', '[', 'i', 'for', 'i', ',', 'b', 'in', 'inventory', ']', 'items', '=', '[', 'i', 'for', 'i', 'in', 'items', 'if', 'i', '.', 'startswith', '(', 'item', ')', ']', 'sys', '.', 'stdout', '.', 'write', '(', '" "', '.', 'join', '(', 'items', ')', ')', '# Suggest items from template', 'else', ':', 'try', ':', 'be', '=', '_extern', '.', 'load_be', '(', 'project', ')', 'templates', '=', '_extern', '.', 'load_templates', '(', 'project', ')', 'inventory', '=', '_extern', '.', 'load_inventory', '(', 'project', ')', 'item', '=', 'topics', '[', '-', '1', ']', 'items', '=', 'lib', '.', 'list_template', '(', 'root', '=', '_extern', '.', 'cwd', '(', ')', ',', 'topics', '=', 'topics', ',', 'templates', '=', 'templates', ',', 'inventory', '=', 'inventory', ',', 'be', '=', 'be', ')', 'if', 'not', 'complete', ':', 'items', '=', 'lib', '.', 'list_template', '(', 'root', '=', '_extern', '.', 'cwd', '(', ')', ',', 'topics', '=', 'topics', '[', ':', '-', '1', ']', ',', 'templates', '=', 'templates', ',', 'inventory', '=', 'inventory', ',', 'be', '=', 'be', ')', 'items', '=', '[', 'i', 'for', 'i', 'in', 'items', 'if', 'i', '.', 'startswith', '(', 'item', ')', ']', 'sys', '.', 'stdout', '.', 'write', '(', '" "', '.', 'join', '(', 'items', ')', '+', '" "', ')', 'else', ':', 'sys', '.', 'stdout', '.', 'write', '(', '" "', '.', 'join', '(', 'items', ')', '+', '" "', ')', 'except', 'IndexError', ':', 'sys', '.', 'exit', '(', 'lib', '.', 'NORMAL', ')']
Utility sub-command for tabcompletion This command is meant to be called by a tab completion function and is given a the currently entered topics, along with a boolean indicating whether or not the last entered argument is complete.
['Utility', 'sub', '-', 'command', 'for', 'tabcompletion']
train
https://github.com/mottosso/be/blob/0f3d4f3597c71223f616d78c6d9b2c8dffcd8a71/be/cli.py#L433-L512
1,288
neon-jungle/wagtailnews
wagtailnews/permissions.py
user_can_edit_news
def user_can_edit_news(user): """ Check if the user has permission to edit any of the registered NewsItem types. """ newsitem_models = [model.get_newsitem_model() for model in NEWSINDEX_MODEL_CLASSES] if user.is_active and user.is_superuser: # admin can edit news iff any news types exist return bool(newsitem_models) for NewsItem in newsitem_models: for perm in format_perms(NewsItem, ['add', 'change', 'delete']): if user.has_perm(perm): return True return False
python
def user_can_edit_news(user): """ Check if the user has permission to edit any of the registered NewsItem types. """ newsitem_models = [model.get_newsitem_model() for model in NEWSINDEX_MODEL_CLASSES] if user.is_active and user.is_superuser: # admin can edit news iff any news types exist return bool(newsitem_models) for NewsItem in newsitem_models: for perm in format_perms(NewsItem, ['add', 'change', 'delete']): if user.has_perm(perm): return True return False
['def', 'user_can_edit_news', '(', 'user', ')', ':', 'newsitem_models', '=', '[', 'model', '.', 'get_newsitem_model', '(', ')', 'for', 'model', 'in', 'NEWSINDEX_MODEL_CLASSES', ']', 'if', 'user', '.', 'is_active', 'and', 'user', '.', 'is_superuser', ':', '# admin can edit news iff any news types exist', 'return', 'bool', '(', 'newsitem_models', ')', 'for', 'NewsItem', 'in', 'newsitem_models', ':', 'for', 'perm', 'in', 'format_perms', '(', 'NewsItem', ',', '[', "'add'", ',', "'change'", ',', "'delete'", ']', ')', ':', 'if', 'user', '.', 'has_perm', '(', 'perm', ')', ':', 'return', 'True', 'return', 'False']
Check if the user has permission to edit any of the registered NewsItem types.
['Check', 'if', 'the', 'user', 'has', 'permission', 'to', 'edit', 'any', 'of', 'the', 'registered', 'NewsItem', 'types', '.']
train
https://github.com/neon-jungle/wagtailnews/blob/4cdec7013cca276dcfc658d3c986444ba6a42a84/wagtailnews/permissions.py#L21-L38
1,289
teepark/greenhouse
greenhouse/io/ssl.py
SSLSocket.do_handshake
def do_handshake(self, timeout): 'perform a SSL/TLS handshake' tout = _timeout(timeout) if not self._blocking: return self._sslobj.do_handshake() while 1: try: return self._sslobj.do_handshake() except ssl.SSLError, exc: if exc.args[0] == ssl.SSL_ERROR_WANT_READ: self._wait_event(tout.now) continue elif exc.args[0] == ssl.SSL_ERROR_WANT_WRITE: self._wait_event(tout.now, write=True) continue raise self._wait_event(timeout) self._sslobj.do_handshake()
python
def do_handshake(self, timeout): 'perform a SSL/TLS handshake' tout = _timeout(timeout) if not self._blocking: return self._sslobj.do_handshake() while 1: try: return self._sslobj.do_handshake() except ssl.SSLError, exc: if exc.args[0] == ssl.SSL_ERROR_WANT_READ: self._wait_event(tout.now) continue elif exc.args[0] == ssl.SSL_ERROR_WANT_WRITE: self._wait_event(tout.now, write=True) continue raise self._wait_event(timeout) self._sslobj.do_handshake()
['def', 'do_handshake', '(', 'self', ',', 'timeout', ')', ':', 'tout', '=', '_timeout', '(', 'timeout', ')', 'if', 'not', 'self', '.', '_blocking', ':', 'return', 'self', '.', '_sslobj', '.', 'do_handshake', '(', ')', 'while', '1', ':', 'try', ':', 'return', 'self', '.', '_sslobj', '.', 'do_handshake', '(', ')', 'except', 'ssl', '.', 'SSLError', ',', 'exc', ':', 'if', 'exc', '.', 'args', '[', '0', ']', '==', 'ssl', '.', 'SSL_ERROR_WANT_READ', ':', 'self', '.', '_wait_event', '(', 'tout', '.', 'now', ')', 'continue', 'elif', 'exc', '.', 'args', '[', '0', ']', '==', 'ssl', '.', 'SSL_ERROR_WANT_WRITE', ':', 'self', '.', '_wait_event', '(', 'tout', '.', 'now', ',', 'write', '=', 'True', ')', 'continue', 'raise', 'self', '.', '_wait_event', '(', 'timeout', ')', 'self', '.', '_sslobj', '.', 'do_handshake', '(', ')']
perform a SSL/TLS handshake
['perform', 'a', 'SSL', '/', 'TLS', 'handshake']
train
https://github.com/teepark/greenhouse/blob/8fd1be4f5443ba090346b5ec82fdbeb0a060d956/greenhouse/io/ssl.py#L242-L261
1,290
alfred82santa/dirty-models
dirty_models/models.py
recover_hashmap_model_from_data
def recover_hashmap_model_from_data(model_class, original_data, modified_data, deleted_data, field_type): """ Function to reconstruct a model from DirtyModel basic information: original data, the modified and deleted fields. Necessary for pickle an object """ model = model_class(field_type=field_type[0](**field_type[1])) return set_model_internal_data(model, original_data, modified_data, deleted_data)
python
def recover_hashmap_model_from_data(model_class, original_data, modified_data, deleted_data, field_type): """ Function to reconstruct a model from DirtyModel basic information: original data, the modified and deleted fields. Necessary for pickle an object """ model = model_class(field_type=field_type[0](**field_type[1])) return set_model_internal_data(model, original_data, modified_data, deleted_data)
['def', 'recover_hashmap_model_from_data', '(', 'model_class', ',', 'original_data', ',', 'modified_data', ',', 'deleted_data', ',', 'field_type', ')', ':', 'model', '=', 'model_class', '(', 'field_type', '=', 'field_type', '[', '0', ']', '(', '*', '*', 'field_type', '[', '1', ']', ')', ')', 'return', 'set_model_internal_data', '(', 'model', ',', 'original_data', ',', 'modified_data', ',', 'deleted_data', ')']
Function to reconstruct a model from DirtyModel basic information: original data, the modified and deleted fields. Necessary for pickle an object
['Function', 'to', 'reconstruct', 'a', 'model', 'from', 'DirtyModel', 'basic', 'information', ':', 'original', 'data', 'the', 'modified', 'and', 'deleted', 'fields', '.', 'Necessary', 'for', 'pickle', 'an', 'object']
train
https://github.com/alfred82santa/dirty-models/blob/354becdb751b21f673515eae928c256c7e923c50/dirty_models/models.py#L853-L860
1,291
kislyuk/aegea
aegea/buckets.py
ls
def ls(args): """ List S3 buckets. See also "aws s3 ls". Use "aws s3 ls NAME" to list bucket contents. """ table = [] for bucket in filter_collection(resources.s3.buckets, args): bucket.LocationConstraint = clients.s3.get_bucket_location(Bucket=bucket.name)["LocationConstraint"] cloudwatch = resources.cloudwatch bucket_region = bucket.LocationConstraint or "us-east-1" if bucket_region != cloudwatch.meta.client.meta.region_name: cloudwatch = boto3.Session(region_name=bucket_region).resource("cloudwatch") data = get_cloudwatch_metric_stats("AWS/S3", "NumberOfObjects", start_time=datetime.utcnow() - timedelta(days=2), end_time=datetime.utcnow(), period=3600, BucketName=bucket.name, StorageType="AllStorageTypes", resource=cloudwatch) bucket.NumberOfObjects = int(data["Datapoints"][-1]["Average"]) if data["Datapoints"] else None data = get_cloudwatch_metric_stats("AWS/S3", "BucketSizeBytes", start_time=datetime.utcnow() - timedelta(days=2), end_time=datetime.utcnow(), period=3600, BucketName=bucket.name, StorageType="StandardStorage", resource=cloudwatch) bucket.BucketSizeBytes = format_number(data["Datapoints"][-1]["Average"]) if data["Datapoints"] else None table.append(bucket) page_output(tabulate(table, args))
python
def ls(args): """ List S3 buckets. See also "aws s3 ls". Use "aws s3 ls NAME" to list bucket contents. """ table = [] for bucket in filter_collection(resources.s3.buckets, args): bucket.LocationConstraint = clients.s3.get_bucket_location(Bucket=bucket.name)["LocationConstraint"] cloudwatch = resources.cloudwatch bucket_region = bucket.LocationConstraint or "us-east-1" if bucket_region != cloudwatch.meta.client.meta.region_name: cloudwatch = boto3.Session(region_name=bucket_region).resource("cloudwatch") data = get_cloudwatch_metric_stats("AWS/S3", "NumberOfObjects", start_time=datetime.utcnow() - timedelta(days=2), end_time=datetime.utcnow(), period=3600, BucketName=bucket.name, StorageType="AllStorageTypes", resource=cloudwatch) bucket.NumberOfObjects = int(data["Datapoints"][-1]["Average"]) if data["Datapoints"] else None data = get_cloudwatch_metric_stats("AWS/S3", "BucketSizeBytes", start_time=datetime.utcnow() - timedelta(days=2), end_time=datetime.utcnow(), period=3600, BucketName=bucket.name, StorageType="StandardStorage", resource=cloudwatch) bucket.BucketSizeBytes = format_number(data["Datapoints"][-1]["Average"]) if data["Datapoints"] else None table.append(bucket) page_output(tabulate(table, args))
['def', 'ls', '(', 'args', ')', ':', 'table', '=', '[', ']', 'for', 'bucket', 'in', 'filter_collection', '(', 'resources', '.', 's3', '.', 'buckets', ',', 'args', ')', ':', 'bucket', '.', 'LocationConstraint', '=', 'clients', '.', 's3', '.', 'get_bucket_location', '(', 'Bucket', '=', 'bucket', '.', 'name', ')', '[', '"LocationConstraint"', ']', 'cloudwatch', '=', 'resources', '.', 'cloudwatch', 'bucket_region', '=', 'bucket', '.', 'LocationConstraint', 'or', '"us-east-1"', 'if', 'bucket_region', '!=', 'cloudwatch', '.', 'meta', '.', 'client', '.', 'meta', '.', 'region_name', ':', 'cloudwatch', '=', 'boto3', '.', 'Session', '(', 'region_name', '=', 'bucket_region', ')', '.', 'resource', '(', '"cloudwatch"', ')', 'data', '=', 'get_cloudwatch_metric_stats', '(', '"AWS/S3"', ',', '"NumberOfObjects"', ',', 'start_time', '=', 'datetime', '.', 'utcnow', '(', ')', '-', 'timedelta', '(', 'days', '=', '2', ')', ',', 'end_time', '=', 'datetime', '.', 'utcnow', '(', ')', ',', 'period', '=', '3600', ',', 'BucketName', '=', 'bucket', '.', 'name', ',', 'StorageType', '=', '"AllStorageTypes"', ',', 'resource', '=', 'cloudwatch', ')', 'bucket', '.', 'NumberOfObjects', '=', 'int', '(', 'data', '[', '"Datapoints"', ']', '[', '-', '1', ']', '[', '"Average"', ']', ')', 'if', 'data', '[', '"Datapoints"', ']', 'else', 'None', 'data', '=', 'get_cloudwatch_metric_stats', '(', '"AWS/S3"', ',', '"BucketSizeBytes"', ',', 'start_time', '=', 'datetime', '.', 'utcnow', '(', ')', '-', 'timedelta', '(', 'days', '=', '2', ')', ',', 'end_time', '=', 'datetime', '.', 'utcnow', '(', ')', ',', 'period', '=', '3600', ',', 'BucketName', '=', 'bucket', '.', 'name', ',', 'StorageType', '=', '"StandardStorage"', ',', 'resource', '=', 'cloudwatch', ')', 'bucket', '.', 'BucketSizeBytes', '=', 'format_number', '(', 'data', '[', '"Datapoints"', ']', '[', '-', '1', ']', '[', '"Average"', ']', ')', 'if', 'data', '[', '"Datapoints"', ']', 'else', 'None', 'table', '.', 'append', '(', 'bucket', ')', 'page_output', '(', 'tabulate', '(', 'table', ',', 'args', ')', ')']
List S3 buckets. See also "aws s3 ls". Use "aws s3 ls NAME" to list bucket contents.
['List', 'S3', 'buckets', '.', 'See', 'also', 'aws', 's3', 'ls', '.', 'Use', 'aws', 's3', 'ls', 'NAME', 'to', 'list', 'bucket', 'contents', '.']
train
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/buckets.py#L21-L43
1,292
senaite/senaite.core
bika/lims/content/abstractanalysis.py
AbstractAnalysis.getAllowedMethods
def getAllowedMethods(self): """Returns the allowed methods for this analysis, either if the method was assigned directly (by using "Allows manual entry of results") or indirectly via Instrument ("Allows instrument entry of results") in Analysis Service Edit View. :return: A list with the methods allowed for this analysis :rtype: list of Methods """ service = self.getAnalysisService() if not service: return [] methods = [] if self.getManualEntryOfResults(): methods = service.getMethods() if self.getInstrumentEntryOfResults(): for instrument in service.getInstruments(): methods.extend(instrument.getMethods()) return list(set(methods))
python
def getAllowedMethods(self): """Returns the allowed methods for this analysis, either if the method was assigned directly (by using "Allows manual entry of results") or indirectly via Instrument ("Allows instrument entry of results") in Analysis Service Edit View. :return: A list with the methods allowed for this analysis :rtype: list of Methods """ service = self.getAnalysisService() if not service: return [] methods = [] if self.getManualEntryOfResults(): methods = service.getMethods() if self.getInstrumentEntryOfResults(): for instrument in service.getInstruments(): methods.extend(instrument.getMethods()) return list(set(methods))
['def', 'getAllowedMethods', '(', 'self', ')', ':', 'service', '=', 'self', '.', 'getAnalysisService', '(', ')', 'if', 'not', 'service', ':', 'return', '[', ']', 'methods', '=', '[', ']', 'if', 'self', '.', 'getManualEntryOfResults', '(', ')', ':', 'methods', '=', 'service', '.', 'getMethods', '(', ')', 'if', 'self', '.', 'getInstrumentEntryOfResults', '(', ')', ':', 'for', 'instrument', 'in', 'service', '.', 'getInstruments', '(', ')', ':', 'methods', '.', 'extend', '(', 'instrument', '.', 'getMethods', '(', ')', ')', 'return', 'list', '(', 'set', '(', 'methods', ')', ')']
Returns the allowed methods for this analysis, either if the method was assigned directly (by using "Allows manual entry of results") or indirectly via Instrument ("Allows instrument entry of results") in Analysis Service Edit View. :return: A list with the methods allowed for this analysis :rtype: list of Methods
['Returns', 'the', 'allowed', 'methods', 'for', 'this', 'analysis', 'either', 'if', 'the', 'method', 'was', 'assigned', 'directly', '(', 'by', 'using', 'Allows', 'manual', 'entry', 'of', 'results', ')', 'or', 'indirectly', 'via', 'Instrument', '(', 'Allows', 'instrument', 'entry', 'of', 'results', ')', 'in', 'Analysis', 'Service', 'Edit', 'View', '.', ':', 'return', ':', 'A', 'list', 'with', 'the', 'methods', 'allowed', 'for', 'this', 'analysis', ':', 'rtype', ':', 'list', 'of', 'Methods']
train
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/abstractanalysis.py#L717-L736
1,293
pywbem/pywbem
pywbem/_listener.py
ListenerRequestHandler.log
def log(self, format_, args, level=logging.INFO): """ This function is called for anything that needs to get logged. It logs to the logger of this listener. It is not defined in the standard handler class; our version has an additional `level` argument that allows to control the logging level in the standard Python logging support. Another difference is that the variable arguments are passed in as a tuple. """ self.server.listener.logger.log(level, format_, *args)
python
def log(self, format_, args, level=logging.INFO): """ This function is called for anything that needs to get logged. It logs to the logger of this listener. It is not defined in the standard handler class; our version has an additional `level` argument that allows to control the logging level in the standard Python logging support. Another difference is that the variable arguments are passed in as a tuple. """ self.server.listener.logger.log(level, format_, *args)
['def', 'log', '(', 'self', ',', 'format_', ',', 'args', ',', 'level', '=', 'logging', '.', 'INFO', ')', ':', 'self', '.', 'server', '.', 'listener', '.', 'logger', '.', 'log', '(', 'level', ',', 'format_', ',', '*', 'args', ')']
This function is called for anything that needs to get logged. It logs to the logger of this listener. It is not defined in the standard handler class; our version has an additional `level` argument that allows to control the logging level in the standard Python logging support. Another difference is that the variable arguments are passed in as a tuple.
['This', 'function', 'is', 'called', 'for', 'anything', 'that', 'needs', 'to', 'get', 'logged', '.', 'It', 'logs', 'to', 'the', 'logger', 'of', 'this', 'listener', '.']
train
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/_listener.py#L547-L559
1,294
Skype4Py/Skype4Py
Skype4Py/skype.py
Skype.PlaceCall
def PlaceCall(self, *Targets): """Places a call to a single user or creates a conference call. :Parameters: Targets : str One or more call targets. If multiple targets are specified, a conference call is created. The call target can be a Skypename, phone number, or speed dial code. :return: A call object. :rtype: `call.Call` """ calls = self.ActiveCalls reply = self._DoCommand('CALL %s' % ', '.join(Targets)) # Skype for Windows returns the call status which gives us the call Id; if reply.startswith('CALL '): return Call(self, chop(reply, 2)[1]) # On linux we get 'OK' as reply so we search for the new call on # list of active calls. for c in self.ActiveCalls: if c not in calls: return c raise SkypeError(0, 'Placing call failed')
python
def PlaceCall(self, *Targets): """Places a call to a single user or creates a conference call. :Parameters: Targets : str One or more call targets. If multiple targets are specified, a conference call is created. The call target can be a Skypename, phone number, or speed dial code. :return: A call object. :rtype: `call.Call` """ calls = self.ActiveCalls reply = self._DoCommand('CALL %s' % ', '.join(Targets)) # Skype for Windows returns the call status which gives us the call Id; if reply.startswith('CALL '): return Call(self, chop(reply, 2)[1]) # On linux we get 'OK' as reply so we search for the new call on # list of active calls. for c in self.ActiveCalls: if c not in calls: return c raise SkypeError(0, 'Placing call failed')
['def', 'PlaceCall', '(', 'self', ',', '*', 'Targets', ')', ':', 'calls', '=', 'self', '.', 'ActiveCalls', 'reply', '=', 'self', '.', '_DoCommand', '(', "'CALL %s'", '%', "', '", '.', 'join', '(', 'Targets', ')', ')', '# Skype for Windows returns the call status which gives us the call Id;', 'if', 'reply', '.', 'startswith', '(', "'CALL '", ')', ':', 'return', 'Call', '(', 'self', ',', 'chop', '(', 'reply', ',', '2', ')', '[', '1', ']', ')', "# On linux we get 'OK' as reply so we search for the new call on", '# list of active calls.', 'for', 'c', 'in', 'self', '.', 'ActiveCalls', ':', 'if', 'c', 'not', 'in', 'calls', ':', 'return', 'c', 'raise', 'SkypeError', '(', '0', ',', "'Placing call failed'", ')']
Places a call to a single user or creates a conference call. :Parameters: Targets : str One or more call targets. If multiple targets are specified, a conference call is created. The call target can be a Skypename, phone number, or speed dial code. :return: A call object. :rtype: `call.Call`
['Places', 'a', 'call', 'to', 'a', 'single', 'user', 'or', 'creates', 'a', 'conference', 'call', '.']
train
https://github.com/Skype4Py/Skype4Py/blob/c48d83f7034109fe46315d45a066126002c6e0d4/Skype4Py/skype.py#L680-L701
1,295
marcomusy/vtkplotter
vtkplotter/actors.py
Prop.scale
def scale(self, s=None): """Set/get actor's scaling factor. :param s: scaling factor(s). :type s: float, list .. note:: if `s==(sx,sy,sz)` scale differently in the three coordinates.""" if s is None: return np.array(self.GetScale()) self.SetScale(s) return self
python
def scale(self, s=None): """Set/get actor's scaling factor. :param s: scaling factor(s). :type s: float, list .. note:: if `s==(sx,sy,sz)` scale differently in the three coordinates.""" if s is None: return np.array(self.GetScale()) self.SetScale(s) return self
['def', 'scale', '(', 'self', ',', 's', '=', 'None', ')', ':', 'if', 's', 'is', 'None', ':', 'return', 'np', '.', 'array', '(', 'self', '.', 'GetScale', '(', ')', ')', 'self', '.', 'SetScale', '(', 's', ')', 'return', 'self']
Set/get actor's scaling factor. :param s: scaling factor(s). :type s: float, list .. note:: if `s==(sx,sy,sz)` scale differently in the three coordinates.
['Set', '/', 'get', 'actor', 's', 'scaling', 'factor', '.']
train
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/actors.py#L372-L382
1,296
binux/pyspider
pyspider/run.py
one
def one(ctx, interactive, enable_phantomjs, enable_puppeteer, scripts): """ One mode not only means all-in-one, it runs every thing in one process over tornado.ioloop, for debug purpose """ ctx.obj['debug'] = False g = ctx.obj g['testing_mode'] = True if scripts: from pyspider.database.local.projectdb import ProjectDB g['projectdb'] = ProjectDB(scripts) if g.get('is_taskdb_default'): g['taskdb'] = connect_database('sqlite+taskdb://') if g.get('is_resultdb_default'): g['resultdb'] = None if enable_phantomjs: phantomjs_config = g.config.get('phantomjs', {}) phantomjs_obj = ctx.invoke(phantomjs, **phantomjs_config) if phantomjs_obj: g.setdefault('phantomjs_proxy', '127.0.0.1:%s' % phantomjs_obj.port) else: phantomjs_obj = None if enable_puppeteer: puppeteer_config = g.config.get('puppeteer', {}) puppeteer_obj = ctx.invoke(puppeteer, **puppeteer_config) if puppeteer_obj: g.setdefault('puppeteer_proxy', '127.0.0.1:%s' % puppeteer.port) else: puppeteer_obj = None result_worker_config = g.config.get('result_worker', {}) if g.resultdb is None: result_worker_config.setdefault('result_cls', 'pyspider.result.OneResultWorker') result_worker_obj = ctx.invoke(result_worker, **result_worker_config) processor_config = g.config.get('processor', {}) processor_config.setdefault('enable_stdout_capture', False) processor_obj = ctx.invoke(processor, **processor_config) fetcher_config = g.config.get('fetcher', {}) fetcher_config.setdefault('xmlrpc', False) fetcher_obj = ctx.invoke(fetcher, **fetcher_config) scheduler_config = g.config.get('scheduler', {}) scheduler_config.setdefault('xmlrpc', False) scheduler_config.setdefault('scheduler_cls', 'pyspider.scheduler.OneScheduler') scheduler_obj = ctx.invoke(scheduler, **scheduler_config) scheduler_obj.init_one(ioloop=fetcher_obj.ioloop, fetcher=fetcher_obj, processor=processor_obj, result_worker=result_worker_obj, interactive=interactive) if scripts: for project in g.projectdb.projects: scheduler_obj.trigger_on_start(project) try: scheduler_obj.run() finally: scheduler_obj.quit() if phantomjs_obj: phantomjs_obj.quit() if puppeteer_obj: puppeteer_obj.quit()
python
def one(ctx, interactive, enable_phantomjs, enable_puppeteer, scripts): """ One mode not only means all-in-one, it runs every thing in one process over tornado.ioloop, for debug purpose """ ctx.obj['debug'] = False g = ctx.obj g['testing_mode'] = True if scripts: from pyspider.database.local.projectdb import ProjectDB g['projectdb'] = ProjectDB(scripts) if g.get('is_taskdb_default'): g['taskdb'] = connect_database('sqlite+taskdb://') if g.get('is_resultdb_default'): g['resultdb'] = None if enable_phantomjs: phantomjs_config = g.config.get('phantomjs', {}) phantomjs_obj = ctx.invoke(phantomjs, **phantomjs_config) if phantomjs_obj: g.setdefault('phantomjs_proxy', '127.0.0.1:%s' % phantomjs_obj.port) else: phantomjs_obj = None if enable_puppeteer: puppeteer_config = g.config.get('puppeteer', {}) puppeteer_obj = ctx.invoke(puppeteer, **puppeteer_config) if puppeteer_obj: g.setdefault('puppeteer_proxy', '127.0.0.1:%s' % puppeteer.port) else: puppeteer_obj = None result_worker_config = g.config.get('result_worker', {}) if g.resultdb is None: result_worker_config.setdefault('result_cls', 'pyspider.result.OneResultWorker') result_worker_obj = ctx.invoke(result_worker, **result_worker_config) processor_config = g.config.get('processor', {}) processor_config.setdefault('enable_stdout_capture', False) processor_obj = ctx.invoke(processor, **processor_config) fetcher_config = g.config.get('fetcher', {}) fetcher_config.setdefault('xmlrpc', False) fetcher_obj = ctx.invoke(fetcher, **fetcher_config) scheduler_config = g.config.get('scheduler', {}) scheduler_config.setdefault('xmlrpc', False) scheduler_config.setdefault('scheduler_cls', 'pyspider.scheduler.OneScheduler') scheduler_obj = ctx.invoke(scheduler, **scheduler_config) scheduler_obj.init_one(ioloop=fetcher_obj.ioloop, fetcher=fetcher_obj, processor=processor_obj, result_worker=result_worker_obj, interactive=interactive) if scripts: for project in g.projectdb.projects: scheduler_obj.trigger_on_start(project) try: scheduler_obj.run() finally: scheduler_obj.quit() if phantomjs_obj: phantomjs_obj.quit() if puppeteer_obj: puppeteer_obj.quit()
['def', 'one', '(', 'ctx', ',', 'interactive', ',', 'enable_phantomjs', ',', 'enable_puppeteer', ',', 'scripts', ')', ':', 'ctx', '.', 'obj', '[', "'debug'", ']', '=', 'False', 'g', '=', 'ctx', '.', 'obj', 'g', '[', "'testing_mode'", ']', '=', 'True', 'if', 'scripts', ':', 'from', 'pyspider', '.', 'database', '.', 'local', '.', 'projectdb', 'import', 'ProjectDB', 'g', '[', "'projectdb'", ']', '=', 'ProjectDB', '(', 'scripts', ')', 'if', 'g', '.', 'get', '(', "'is_taskdb_default'", ')', ':', 'g', '[', "'taskdb'", ']', '=', 'connect_database', '(', "'sqlite+taskdb://'", ')', 'if', 'g', '.', 'get', '(', "'is_resultdb_default'", ')', ':', 'g', '[', "'resultdb'", ']', '=', 'None', 'if', 'enable_phantomjs', ':', 'phantomjs_config', '=', 'g', '.', 'config', '.', 'get', '(', "'phantomjs'", ',', '{', '}', ')', 'phantomjs_obj', '=', 'ctx', '.', 'invoke', '(', 'phantomjs', ',', '*', '*', 'phantomjs_config', ')', 'if', 'phantomjs_obj', ':', 'g', '.', 'setdefault', '(', "'phantomjs_proxy'", ',', "'127.0.0.1:%s'", '%', 'phantomjs_obj', '.', 'port', ')', 'else', ':', 'phantomjs_obj', '=', 'None', 'if', 'enable_puppeteer', ':', 'puppeteer_config', '=', 'g', '.', 'config', '.', 'get', '(', "'puppeteer'", ',', '{', '}', ')', 'puppeteer_obj', '=', 'ctx', '.', 'invoke', '(', 'puppeteer', ',', '*', '*', 'puppeteer_config', ')', 'if', 'puppeteer_obj', ':', 'g', '.', 'setdefault', '(', "'puppeteer_proxy'", ',', "'127.0.0.1:%s'", '%', 'puppeteer', '.', 'port', ')', 'else', ':', 'puppeteer_obj', '=', 'None', 'result_worker_config', '=', 'g', '.', 'config', '.', 'get', '(', "'result_worker'", ',', '{', '}', ')', 'if', 'g', '.', 'resultdb', 'is', 'None', ':', 'result_worker_config', '.', 'setdefault', '(', "'result_cls'", ',', "'pyspider.result.OneResultWorker'", ')', 'result_worker_obj', '=', 'ctx', '.', 'invoke', '(', 'result_worker', ',', '*', '*', 'result_worker_config', ')', 'processor_config', '=', 'g', '.', 'config', '.', 'get', '(', "'processor'", ',', '{', '}', ')', 'processor_config', '.', 'setdefault', '(', "'enable_stdout_capture'", ',', 'False', ')', 'processor_obj', '=', 'ctx', '.', 'invoke', '(', 'processor', ',', '*', '*', 'processor_config', ')', 'fetcher_config', '=', 'g', '.', 'config', '.', 'get', '(', "'fetcher'", ',', '{', '}', ')', 'fetcher_config', '.', 'setdefault', '(', "'xmlrpc'", ',', 'False', ')', 'fetcher_obj', '=', 'ctx', '.', 'invoke', '(', 'fetcher', ',', '*', '*', 'fetcher_config', ')', 'scheduler_config', '=', 'g', '.', 'config', '.', 'get', '(', "'scheduler'", ',', '{', '}', ')', 'scheduler_config', '.', 'setdefault', '(', "'xmlrpc'", ',', 'False', ')', 'scheduler_config', '.', 'setdefault', '(', "'scheduler_cls'", ',', "'pyspider.scheduler.OneScheduler'", ')', 'scheduler_obj', '=', 'ctx', '.', 'invoke', '(', 'scheduler', ',', '*', '*', 'scheduler_config', ')', 'scheduler_obj', '.', 'init_one', '(', 'ioloop', '=', 'fetcher_obj', '.', 'ioloop', ',', 'fetcher', '=', 'fetcher_obj', ',', 'processor', '=', 'processor_obj', ',', 'result_worker', '=', 'result_worker_obj', ',', 'interactive', '=', 'interactive', ')', 'if', 'scripts', ':', 'for', 'project', 'in', 'g', '.', 'projectdb', '.', 'projects', ':', 'scheduler_obj', '.', 'trigger_on_start', '(', 'project', ')', 'try', ':', 'scheduler_obj', '.', 'run', '(', ')', 'finally', ':', 'scheduler_obj', '.', 'quit', '(', ')', 'if', 'phantomjs_obj', ':', 'phantomjs_obj', '.', 'quit', '(', ')', 'if', 'puppeteer_obj', ':', 'puppeteer_obj', '.', 'quit', '(', ')']
One mode not only means all-in-one, it runs every thing in one process over tornado.ioloop, for debug purpose
['One', 'mode', 'not', 'only', 'means', 'all', '-', 'in', '-', 'one', 'it', 'runs', 'every', 'thing', 'in', 'one', 'process', 'over', 'tornado', '.', 'ioloop', 'for', 'debug', 'purpose']
train
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/run.py#L723-L793
1,297
inasafe/inasafe
safe/report/expressions/html_report.py
place_analysis_summary_report
def place_analysis_summary_report(feature, parent): """Retrieve an HTML place analysis table report from a multi exposure analysis. """ _ = feature, parent # NOQA analysis_dir = get_analysis_dir(exposure_place['key']) if analysis_dir: return get_impact_report_as_string(analysis_dir) return None
python
def place_analysis_summary_report(feature, parent): """Retrieve an HTML place analysis table report from a multi exposure analysis. """ _ = feature, parent # NOQA analysis_dir = get_analysis_dir(exposure_place['key']) if analysis_dir: return get_impact_report_as_string(analysis_dir) return None
['def', 'place_analysis_summary_report', '(', 'feature', ',', 'parent', ')', ':', '_', '=', 'feature', ',', 'parent', '# NOQA', 'analysis_dir', '=', 'get_analysis_dir', '(', 'exposure_place', '[', "'key'", ']', ')', 'if', 'analysis_dir', ':', 'return', 'get_impact_report_as_string', '(', 'analysis_dir', ')', 'return', 'None']
Retrieve an HTML place analysis table report from a multi exposure analysis.
['Retrieve', 'an', 'HTML', 'place', 'analysis', 'table', 'report', 'from', 'a', 'multi', 'exposure', 'analysis', '.']
train
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/report/expressions/html_report.py#L618-L626
1,298
koenedaele/pyramid_skosprovider
pyramid_skosprovider/renderers.py
_map_relations
def _map_relations(relations, p, language='any'): ''' :param: :class:`list` relations: Relations to be mapped. These are concept or collection id's. :param: :class:`skosprovider.providers.VocabularyProvider` p: Provider to look up id's. :param string language: Language to render the relations' labels in :rtype: :class:`list` ''' ret = [] for r in relations: c = p.get_by_id(r) if c: ret.append(_map_relation(c, language)) else: log.warning( 'A relation references a concept or collection %d in provider %s that can not be found. Please check the integrity of your data.' % (r, p.get_vocabulary_id()) ) return ret
python
def _map_relations(relations, p, language='any'): ''' :param: :class:`list` relations: Relations to be mapped. These are concept or collection id's. :param: :class:`skosprovider.providers.VocabularyProvider` p: Provider to look up id's. :param string language: Language to render the relations' labels in :rtype: :class:`list` ''' ret = [] for r in relations: c = p.get_by_id(r) if c: ret.append(_map_relation(c, language)) else: log.warning( 'A relation references a concept or collection %d in provider %s that can not be found. Please check the integrity of your data.' % (r, p.get_vocabulary_id()) ) return ret
['def', '_map_relations', '(', 'relations', ',', 'p', ',', 'language', '=', "'any'", ')', ':', 'ret', '=', '[', ']', 'for', 'r', 'in', 'relations', ':', 'c', '=', 'p', '.', 'get_by_id', '(', 'r', ')', 'if', 'c', ':', 'ret', '.', 'append', '(', '_map_relation', '(', 'c', ',', 'language', ')', ')', 'else', ':', 'log', '.', 'warning', '(', "'A relation references a concept or collection %d in provider %s that can not be found. Please check the integrity of your data.'", '%', '(', 'r', ',', 'p', '.', 'get_vocabulary_id', '(', ')', ')', ')', 'return', 'ret']
:param: :class:`list` relations: Relations to be mapped. These are concept or collection id's. :param: :class:`skosprovider.providers.VocabularyProvider` p: Provider to look up id's. :param string language: Language to render the relations' labels in :rtype: :class:`list`
[':', 'param', ':', ':', 'class', ':', 'list', 'relations', ':', 'Relations', 'to', 'be', 'mapped', '.', 'These', 'are', 'concept', 'or', 'collection', 'id', 's', '.', ':', 'param', ':', ':', 'class', ':', 'skosprovider', '.', 'providers', '.', 'VocabularyProvider', 'p', ':', 'Provider', 'to', 'look', 'up', 'id', 's', '.', ':', 'param', 'string', 'language', ':', 'Language', 'to', 'render', 'the', 'relations', 'labels', 'in', ':', 'rtype', ':', ':', 'class', ':', 'list']
train
https://github.com/koenedaele/pyramid_skosprovider/blob/3affdb53cac7ad01bf3656ecd4c4d7ad9b4948b6/pyramid_skosprovider/renderers.py#L81-L100
1,299
gboeing/osmnx
osmnx/core.py
truncate_graph_bbox
def truncate_graph_bbox(G, north, south, east, west, truncate_by_edge=False, retain_all=False): """ Remove every node in graph that falls outside a bounding box. Needed because overpass returns entire ways that also include nodes outside the bbox if the way (that is, a way with a single OSM ID) has a node inside the bbox at some point. Parameters ---------- G : networkx multidigraph north : float northern latitude of bounding box south : float southern latitude of bounding box east : float eastern longitude of bounding box west : float western longitude of bounding box truncate_by_edge : bool if True retain node if it's outside bbox but at least one of node's neighbors are within bbox retain_all : bool if True, return the entire graph even if it is not connected Returns ------- networkx multidigraph """ start_time = time.time() G = G.copy() nodes_outside_bbox = [] for node, data in G.nodes(data=True): if data['y'] > north or data['y'] < south or data['x'] > east or data['x'] < west: # this node is outside the bounding box if not truncate_by_edge: # if we're not truncating by edge, add node to list of nodes # outside the bounding box nodes_outside_bbox.append(node) else: # if we're truncating by edge, see if any of node's neighbors # are within bounding box any_neighbors_in_bbox = False neighbors = list(G.successors(node)) + list(G.predecessors(node)) for neighbor in neighbors: x = G.nodes[neighbor]['x'] y = G.nodes[neighbor]['y'] if y < north and y > south and x < east and x > west: any_neighbors_in_bbox = True break # if none of its neighbors are within the bounding box, add node # to list of nodes outside the bounding box if not any_neighbors_in_bbox: nodes_outside_bbox.append(node) G.remove_nodes_from(nodes_outside_bbox) log('Truncated graph by bounding box in {:,.2f} seconds'.format(time.time()-start_time)) # remove any isolated nodes and retain only the largest component (if # retain_all is True) if not retain_all: G = remove_isolated_nodes(G) G = get_largest_component(G) return G
python
def truncate_graph_bbox(G, north, south, east, west, truncate_by_edge=False, retain_all=False): """ Remove every node in graph that falls outside a bounding box. Needed because overpass returns entire ways that also include nodes outside the bbox if the way (that is, a way with a single OSM ID) has a node inside the bbox at some point. Parameters ---------- G : networkx multidigraph north : float northern latitude of bounding box south : float southern latitude of bounding box east : float eastern longitude of bounding box west : float western longitude of bounding box truncate_by_edge : bool if True retain node if it's outside bbox but at least one of node's neighbors are within bbox retain_all : bool if True, return the entire graph even if it is not connected Returns ------- networkx multidigraph """ start_time = time.time() G = G.copy() nodes_outside_bbox = [] for node, data in G.nodes(data=True): if data['y'] > north or data['y'] < south or data['x'] > east or data['x'] < west: # this node is outside the bounding box if not truncate_by_edge: # if we're not truncating by edge, add node to list of nodes # outside the bounding box nodes_outside_bbox.append(node) else: # if we're truncating by edge, see if any of node's neighbors # are within bounding box any_neighbors_in_bbox = False neighbors = list(G.successors(node)) + list(G.predecessors(node)) for neighbor in neighbors: x = G.nodes[neighbor]['x'] y = G.nodes[neighbor]['y'] if y < north and y > south and x < east and x > west: any_neighbors_in_bbox = True break # if none of its neighbors are within the bounding box, add node # to list of nodes outside the bounding box if not any_neighbors_in_bbox: nodes_outside_bbox.append(node) G.remove_nodes_from(nodes_outside_bbox) log('Truncated graph by bounding box in {:,.2f} seconds'.format(time.time()-start_time)) # remove any isolated nodes and retain only the largest component (if # retain_all is True) if not retain_all: G = remove_isolated_nodes(G) G = get_largest_component(G) return G
['def', 'truncate_graph_bbox', '(', 'G', ',', 'north', ',', 'south', ',', 'east', ',', 'west', ',', 'truncate_by_edge', '=', 'False', ',', 'retain_all', '=', 'False', ')', ':', 'start_time', '=', 'time', '.', 'time', '(', ')', 'G', '=', 'G', '.', 'copy', '(', ')', 'nodes_outside_bbox', '=', '[', ']', 'for', 'node', ',', 'data', 'in', 'G', '.', 'nodes', '(', 'data', '=', 'True', ')', ':', 'if', 'data', '[', "'y'", ']', '>', 'north', 'or', 'data', '[', "'y'", ']', '<', 'south', 'or', 'data', '[', "'x'", ']', '>', 'east', 'or', 'data', '[', "'x'", ']', '<', 'west', ':', '# this node is outside the bounding box', 'if', 'not', 'truncate_by_edge', ':', "# if we're not truncating by edge, add node to list of nodes", '# outside the bounding box', 'nodes_outside_bbox', '.', 'append', '(', 'node', ')', 'else', ':', "# if we're truncating by edge, see if any of node's neighbors", '# are within bounding box', 'any_neighbors_in_bbox', '=', 'False', 'neighbors', '=', 'list', '(', 'G', '.', 'successors', '(', 'node', ')', ')', '+', 'list', '(', 'G', '.', 'predecessors', '(', 'node', ')', ')', 'for', 'neighbor', 'in', 'neighbors', ':', 'x', '=', 'G', '.', 'nodes', '[', 'neighbor', ']', '[', "'x'", ']', 'y', '=', 'G', '.', 'nodes', '[', 'neighbor', ']', '[', "'y'", ']', 'if', 'y', '<', 'north', 'and', 'y', '>', 'south', 'and', 'x', '<', 'east', 'and', 'x', '>', 'west', ':', 'any_neighbors_in_bbox', '=', 'True', 'break', '# if none of its neighbors are within the bounding box, add node', '# to list of nodes outside the bounding box', 'if', 'not', 'any_neighbors_in_bbox', ':', 'nodes_outside_bbox', '.', 'append', '(', 'node', ')', 'G', '.', 'remove_nodes_from', '(', 'nodes_outside_bbox', ')', 'log', '(', "'Truncated graph by bounding box in {:,.2f} seconds'", '.', 'format', '(', 'time', '.', 'time', '(', ')', '-', 'start_time', ')', ')', '# remove any isolated nodes and retain only the largest component (if', '# retain_all is True)', 'if', 'not', 'retain_all', ':', 'G', '=', 'remove_isolated_nodes', '(', 'G', ')', 'G', '=', 'get_largest_component', '(', 'G', ')', 'return', 'G']
Remove every node in graph that falls outside a bounding box. Needed because overpass returns entire ways that also include nodes outside the bbox if the way (that is, a way with a single OSM ID) has a node inside the bbox at some point. Parameters ---------- G : networkx multidigraph north : float northern latitude of bounding box south : float southern latitude of bounding box east : float eastern longitude of bounding box west : float western longitude of bounding box truncate_by_edge : bool if True retain node if it's outside bbox but at least one of node's neighbors are within bbox retain_all : bool if True, return the entire graph even if it is not connected Returns ------- networkx multidigraph
['Remove', 'every', 'node', 'in', 'graph', 'that', 'falls', 'outside', 'a', 'bounding', 'box', '.']
train
https://github.com/gboeing/osmnx/blob/be59fd313bcb68af8fc79242c56194f1247e26e2/osmnx/core.py#L940-L1007