Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
700
troeger/opensubmit
web/opensubmit/admin/course.py
CourseAdmin.get_queryset
def get_queryset(self, request): ''' Restrict the listed courses for the current user.''' qs = super(CourseAdmin, self).get_queryset(request) if request.user.is_superuser: return qs else: return qs.filter(Q(tutors__pk=request.user.pk) | Q(owner=request.user)).distinct()
python
def get_queryset(self, request): ''' Restrict the listed courses for the current user.''' qs = super(CourseAdmin, self).get_queryset(request) if request.user.is_superuser: return qs else: return qs.filter(Q(tutors__pk=request.user.pk) | Q(owner=request.user)).distinct()
['def', 'get_queryset', '(', 'self', ',', 'request', ')', ':', 'qs', '=', 'super', '(', 'CourseAdmin', ',', 'self', ')', '.', 'get_queryset', '(', 'request', ')', 'if', 'request', '.', 'user', '.', 'is_superuser', ':', 'return', 'qs', 'else', ':', 'return', 'qs', '.', 'filter', '(', 'Q', '(', 'tutors__pk', '=', 'request', '.', 'user', '.', 'pk', ')', '|', 'Q', '(', 'owner', '=', 'request', '.', 'user', ')', ')', '.', 'distinct', '(', ')']
Restrict the listed courses for the current user.
['Restrict', 'the', 'listed', 'courses', 'for', 'the', 'current', 'user', '.']
train
https://github.com/troeger/opensubmit/blob/384a95b7c6fa41e3f949a129d25dafd9a1c54859/web/opensubmit/admin/course.py#L25-L31
701
allenai/allennlp
allennlp/common/params.py
Params.get
def get(self, key: str, default: Any = DEFAULT): """ Performs the functionality associated with dict.get(key) but also checks for returned dicts and returns a Params object in their place with an updated history. """ if default is self.DEFAULT: try: value = self.params.get(key) except KeyError: raise ConfigurationError("key \"{}\" is required at location \"{}\"".format(key, self.history)) else: value = self.params.get(key, default) return self._check_is_dict(key, value)
python
def get(self, key: str, default: Any = DEFAULT): """ Performs the functionality associated with dict.get(key) but also checks for returned dicts and returns a Params object in their place with an updated history. """ if default is self.DEFAULT: try: value = self.params.get(key) except KeyError: raise ConfigurationError("key \"{}\" is required at location \"{}\"".format(key, self.history)) else: value = self.params.get(key, default) return self._check_is_dict(key, value)
['def', 'get', '(', 'self', ',', 'key', ':', 'str', ',', 'default', ':', 'Any', '=', 'DEFAULT', ')', ':', 'if', 'default', 'is', 'self', '.', 'DEFAULT', ':', 'try', ':', 'value', '=', 'self', '.', 'params', '.', 'get', '(', 'key', ')', 'except', 'KeyError', ':', 'raise', 'ConfigurationError', '(', '"key \\"{}\\" is required at location \\"{}\\""', '.', 'format', '(', 'key', ',', 'self', '.', 'history', ')', ')', 'else', ':', 'value', '=', 'self', '.', 'params', '.', 'get', '(', 'key', ',', 'default', ')', 'return', 'self', '.', '_check_is_dict', '(', 'key', ',', 'value', ')']
Performs the functionality associated with dict.get(key) but also checks for returned dicts and returns a Params object in their place with an updated history.
['Performs', 'the', 'functionality', 'associated', 'with', 'dict', '.', 'get', '(', 'key', ')', 'but', 'also', 'checks', 'for', 'returned', 'dicts', 'and', 'returns', 'a', 'Params', 'object', 'in', 'their', 'place', 'with', 'an', 'updated', 'history', '.']
train
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/common/params.py#L291-L303
702
deepmind/pysc2
pysc2/lib/run_parallel.py
RunParallel.run
def run(self, funcs): """Run a set of functions in parallel, returning their results. Make sure any function you pass exits with a reasonable timeout. If it doesn't return within the timeout or the result is ignored due an exception in a separate thread it will continue to stick around until it finishes, including blocking process exit. Args: funcs: An iterable of functions or iterable of args to functools.partial. Returns: A list of return values with the values matching the order in funcs. Raises: Propagates the first exception encountered in one of the functions. """ funcs = [f if callable(f) else functools.partial(*f) for f in funcs] if len(funcs) == 1: # Ignore threads if it's not needed. return [funcs[0]()] if len(funcs) > self._workers: # Lazy init and grow as needed. self.shutdown() self._workers = len(funcs) self._executor = futures.ThreadPoolExecutor(self._workers) futs = [self._executor.submit(f) for f in funcs] done, not_done = futures.wait(futs, self._timeout, futures.FIRST_EXCEPTION) # Make sure to propagate any exceptions. for f in done: if not f.cancelled() and f.exception() is not None: if not_done: # If there are some calls that haven't finished, cancel and recreate # the thread pool. Otherwise we may have a thread running forever # blocking parallel calls. for nd in not_done: nd.cancel() self.shutdown(False) # Don't wait, they may be deadlocked. raise f.exception() # Either done or timed out, so don't wait again. return [f.result(timeout=0) for f in futs]
python
def run(self, funcs): """Run a set of functions in parallel, returning their results. Make sure any function you pass exits with a reasonable timeout. If it doesn't return within the timeout or the result is ignored due an exception in a separate thread it will continue to stick around until it finishes, including blocking process exit. Args: funcs: An iterable of functions or iterable of args to functools.partial. Returns: A list of return values with the values matching the order in funcs. Raises: Propagates the first exception encountered in one of the functions. """ funcs = [f if callable(f) else functools.partial(*f) for f in funcs] if len(funcs) == 1: # Ignore threads if it's not needed. return [funcs[0]()] if len(funcs) > self._workers: # Lazy init and grow as needed. self.shutdown() self._workers = len(funcs) self._executor = futures.ThreadPoolExecutor(self._workers) futs = [self._executor.submit(f) for f in funcs] done, not_done = futures.wait(futs, self._timeout, futures.FIRST_EXCEPTION) # Make sure to propagate any exceptions. for f in done: if not f.cancelled() and f.exception() is not None: if not_done: # If there are some calls that haven't finished, cancel and recreate # the thread pool. Otherwise we may have a thread running forever # blocking parallel calls. for nd in not_done: nd.cancel() self.shutdown(False) # Don't wait, they may be deadlocked. raise f.exception() # Either done or timed out, so don't wait again. return [f.result(timeout=0) for f in futs]
['def', 'run', '(', 'self', ',', 'funcs', ')', ':', 'funcs', '=', '[', 'f', 'if', 'callable', '(', 'f', ')', 'else', 'functools', '.', 'partial', '(', '*', 'f', ')', 'for', 'f', 'in', 'funcs', ']', 'if', 'len', '(', 'funcs', ')', '==', '1', ':', "# Ignore threads if it's not needed.", 'return', '[', 'funcs', '[', '0', ']', '(', ')', ']', 'if', 'len', '(', 'funcs', ')', '>', 'self', '.', '_workers', ':', '# Lazy init and grow as needed.', 'self', '.', 'shutdown', '(', ')', 'self', '.', '_workers', '=', 'len', '(', 'funcs', ')', 'self', '.', '_executor', '=', 'futures', '.', 'ThreadPoolExecutor', '(', 'self', '.', '_workers', ')', 'futs', '=', '[', 'self', '.', '_executor', '.', 'submit', '(', 'f', ')', 'for', 'f', 'in', 'funcs', ']', 'done', ',', 'not_done', '=', 'futures', '.', 'wait', '(', 'futs', ',', 'self', '.', '_timeout', ',', 'futures', '.', 'FIRST_EXCEPTION', ')', '# Make sure to propagate any exceptions.', 'for', 'f', 'in', 'done', ':', 'if', 'not', 'f', '.', 'cancelled', '(', ')', 'and', 'f', '.', 'exception', '(', ')', 'is', 'not', 'None', ':', 'if', 'not_done', ':', "# If there are some calls that haven't finished, cancel and recreate", '# the thread pool. Otherwise we may have a thread running forever', '# blocking parallel calls.', 'for', 'nd', 'in', 'not_done', ':', 'nd', '.', 'cancel', '(', ')', 'self', '.', 'shutdown', '(', 'False', ')', "# Don't wait, they may be deadlocked.", 'raise', 'f', '.', 'exception', '(', ')', "# Either done or timed out, so don't wait again.", 'return', '[', 'f', '.', 'result', '(', 'timeout', '=', '0', ')', 'for', 'f', 'in', 'futs', ']']
Run a set of functions in parallel, returning their results. Make sure any function you pass exits with a reasonable timeout. If it doesn't return within the timeout or the result is ignored due an exception in a separate thread it will continue to stick around until it finishes, including blocking process exit. Args: funcs: An iterable of functions or iterable of args to functools.partial. Returns: A list of return values with the values matching the order in funcs. Raises: Propagates the first exception encountered in one of the functions.
['Run', 'a', 'set', 'of', 'functions', 'in', 'parallel', 'returning', 'their', 'results', '.']
train
https://github.com/deepmind/pysc2/blob/df4cc4b00f07a2242be9ba153d4a7f4ad2017897/pysc2/lib/run_parallel.py#L37-L75
703
PaloAltoNetworks/pancloud
pancloud/credentials.py
Credentials._credentials_found_in_envars
def _credentials_found_in_envars(): """Check for credentials in envars. Returns: bool: ``True`` if at least one is found, otherwise ``False``. """ return any([os.getenv('PAN_ACCESS_TOKEN'), os.getenv('PAN_CLIENT_ID'), os.getenv('PAN_CLIENT_SECRET'), os.getenv('PAN_REFRESH_TOKEN')])
python
def _credentials_found_in_envars(): """Check for credentials in envars. Returns: bool: ``True`` if at least one is found, otherwise ``False``. """ return any([os.getenv('PAN_ACCESS_TOKEN'), os.getenv('PAN_CLIENT_ID'), os.getenv('PAN_CLIENT_SECRET'), os.getenv('PAN_REFRESH_TOKEN')])
['def', '_credentials_found_in_envars', '(', ')', ':', 'return', 'any', '(', '[', 'os', '.', 'getenv', '(', "'PAN_ACCESS_TOKEN'", ')', ',', 'os', '.', 'getenv', '(', "'PAN_CLIENT_ID'", ')', ',', 'os', '.', 'getenv', '(', "'PAN_CLIENT_SECRET'", ')', ',', 'os', '.', 'getenv', '(', "'PAN_REFRESH_TOKEN'", ')', ']', ')']
Check for credentials in envars. Returns: bool: ``True`` if at least one is found, otherwise ``False``.
['Check', 'for', 'credentials', 'in', 'envars', '.']
train
https://github.com/PaloAltoNetworks/pancloud/blob/c51e4c8aca3c988c60f062291007534edcb55285/pancloud/credentials.py#L183-L193
704
tomer8007/kik-bot-api-unofficial
kik_unofficial/client.py
KikClient.check_username_uniqueness
def check_username_uniqueness(self, username): """ Checks if the given username is available for registration. Results are returned in the on_username_uniqueness_received() callback :param username: The username to check for its existence """ log.info("[+] Checking for Uniqueness of username '{}'".format(username)) return self._send_xmpp_element(sign_up.CheckUsernameUniquenessRequest(username))
python
def check_username_uniqueness(self, username): """ Checks if the given username is available for registration. Results are returned in the on_username_uniqueness_received() callback :param username: The username to check for its existence """ log.info("[+] Checking for Uniqueness of username '{}'".format(username)) return self._send_xmpp_element(sign_up.CheckUsernameUniquenessRequest(username))
['def', 'check_username_uniqueness', '(', 'self', ',', 'username', ')', ':', 'log', '.', 'info', '(', '"[+] Checking for Uniqueness of username \'{}\'"', '.', 'format', '(', 'username', ')', ')', 'return', 'self', '.', '_send_xmpp_element', '(', 'sign_up', '.', 'CheckUsernameUniquenessRequest', '(', 'username', ')', ')']
Checks if the given username is available for registration. Results are returned in the on_username_uniqueness_received() callback :param username: The username to check for its existence
['Checks', 'if', 'the', 'given', 'username', 'is', 'available', 'for', 'registration', '.', 'Results', 'are', 'returned', 'in', 'the', 'on_username_uniqueness_received', '()', 'callback']
train
https://github.com/tomer8007/kik-bot-api-unofficial/blob/2ae5216bc05e7099a41895382fc8e428a7a5c3ac/kik_unofficial/client.py#L359-L367
705
ellmetha/django-machina
machina/apps/forum_conversation/forum_polls/views.py
TopicPollVoteView.get_form_kwargs
def get_form_kwargs(self): """ Returns the keyword arguments to provide tp the associated form. """ kwargs = super(ModelFormMixin, self).get_form_kwargs() kwargs['poll'] = self.object return kwargs
python
def get_form_kwargs(self): """ Returns the keyword arguments to provide tp the associated form. """ kwargs = super(ModelFormMixin, self).get_form_kwargs() kwargs['poll'] = self.object return kwargs
['def', 'get_form_kwargs', '(', 'self', ')', ':', 'kwargs', '=', 'super', '(', 'ModelFormMixin', ',', 'self', ')', '.', 'get_form_kwargs', '(', ')', 'kwargs', '[', "'poll'", ']', '=', 'self', '.', 'object', 'return', 'kwargs']
Returns the keyword arguments to provide tp the associated form.
['Returns', 'the', 'keyword', 'arguments', 'to', 'provide', 'tp', 'the', 'associated', 'form', '.']
train
https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum_conversation/forum_polls/views.py#L37-L41
706
bitesofcode/projex
projex/init.py
importobject
def importobject(module_name, object_name): """ Imports the object with the given name from the inputted module. :param module_name | <str> object_name | <str> :usage |>>> import projex |>>> modname = 'projex.envmanager' |>>> attr = 'EnvManager' |>>> EnvManager = projex.importobject(modname, attr) :return <object> || None """ if module_name not in sys.modules: try: __import__(module_name) except ImportError: logger.debug(traceback.print_exc()) logger.error('Could not import module: %s', module_name) return None module = sys.modules.get(module_name) if not module: logger.warning('No module %s found.' % module_name) return None if not hasattr(module, object_name): logger.warning('No object %s in %s.' % (object_name, module_name)) return None return getattr(module, object_name)
python
def importobject(module_name, object_name): """ Imports the object with the given name from the inputted module. :param module_name | <str> object_name | <str> :usage |>>> import projex |>>> modname = 'projex.envmanager' |>>> attr = 'EnvManager' |>>> EnvManager = projex.importobject(modname, attr) :return <object> || None """ if module_name not in sys.modules: try: __import__(module_name) except ImportError: logger.debug(traceback.print_exc()) logger.error('Could not import module: %s', module_name) return None module = sys.modules.get(module_name) if not module: logger.warning('No module %s found.' % module_name) return None if not hasattr(module, object_name): logger.warning('No object %s in %s.' % (object_name, module_name)) return None return getattr(module, object_name)
['def', 'importobject', '(', 'module_name', ',', 'object_name', ')', ':', 'if', 'module_name', 'not', 'in', 'sys', '.', 'modules', ':', 'try', ':', '__import__', '(', 'module_name', ')', 'except', 'ImportError', ':', 'logger', '.', 'debug', '(', 'traceback', '.', 'print_exc', '(', ')', ')', 'logger', '.', 'error', '(', "'Could not import module: %s'", ',', 'module_name', ')', 'return', 'None', 'module', '=', 'sys', '.', 'modules', '.', 'get', '(', 'module_name', ')', 'if', 'not', 'module', ':', 'logger', '.', 'warning', '(', "'No module %s found.'", '%', 'module_name', ')', 'return', 'None', 'if', 'not', 'hasattr', '(', 'module', ',', 'object_name', ')', ':', 'logger', '.', 'warning', '(', "'No object %s in %s.'", '%', '(', 'object_name', ',', 'module_name', ')', ')', 'return', 'None', 'return', 'getattr', '(', 'module', ',', 'object_name', ')']
Imports the object with the given name from the inputted module. :param module_name | <str> object_name | <str> :usage |>>> import projex |>>> modname = 'projex.envmanager' |>>> attr = 'EnvManager' |>>> EnvManager = projex.importobject(modname, attr) :return <object> || None
['Imports', 'the', 'object', 'with', 'the', 'given', 'name', 'from', 'the', 'inputted', 'module', '.', ':', 'param', 'module_name', '|', '<str', '>', 'object_name', '|', '<str', '>', ':', 'usage', '|', '>>>', 'import', 'projex', '|', '>>>', 'modname', '=', 'projex', '.', 'envmanager', '|', '>>>', 'attr', '=', 'EnvManager', '|', '>>>', 'EnvManager', '=', 'projex', '.', 'importobject', '(', 'modname', 'attr', ')', ':', 'return', '<object', '>', '||', 'None']
train
https://github.com/bitesofcode/projex/blob/d31743ec456a41428709968ab11a2cf6c6c76247/projex/init.py#L257-L288
707
petebachant/Nortek-Python
nortek/controls.py
PdControl.coordinate_system
def coordinate_system(self, coordsys): """Sets instrument coordinate system. Accepts an int or string.""" if coordsys.upper() == "ENU": ncs = 0 elif coordsys.upper() == "XYZ": ncs = 1 elif coordsys.upper() == "BEAM": ncs = 2 elif coordsys in [0, 1, 2]: ncs = coordsys else: raise ValueError("Invalid coordinate system selection") self.pdx.CoordinateSystem = ncs
python
def coordinate_system(self, coordsys): """Sets instrument coordinate system. Accepts an int or string.""" if coordsys.upper() == "ENU": ncs = 0 elif coordsys.upper() == "XYZ": ncs = 1 elif coordsys.upper() == "BEAM": ncs = 2 elif coordsys in [0, 1, 2]: ncs = coordsys else: raise ValueError("Invalid coordinate system selection") self.pdx.CoordinateSystem = ncs
['def', 'coordinate_system', '(', 'self', ',', 'coordsys', ')', ':', 'if', 'coordsys', '.', 'upper', '(', ')', '==', '"ENU"', ':', 'ncs', '=', '0', 'elif', 'coordsys', '.', 'upper', '(', ')', '==', '"XYZ"', ':', 'ncs', '=', '1', 'elif', 'coordsys', '.', 'upper', '(', ')', '==', '"BEAM"', ':', 'ncs', '=', '2', 'elif', 'coordsys', 'in', '[', '0', ',', '1', ',', '2', ']', ':', 'ncs', '=', 'coordsys', 'else', ':', 'raise', 'ValueError', '(', '"Invalid coordinate system selection"', ')', 'self', '.', 'pdx', '.', 'CoordinateSystem', '=', 'ncs']
Sets instrument coordinate system. Accepts an int or string.
['Sets', 'instrument', 'coordinate', 'system', '.', 'Accepts', 'an', 'int', 'or', 'string', '.']
train
https://github.com/petebachant/Nortek-Python/blob/6c979662cf62c11ad5899ccc5e53365c87e5be02/nortek/controls.py#L332-L344
708
danilobellini/audiolazy
audiolazy/lazy_lpc.py
lpc
def lpc(blk, order=None): """ Find the Linear Predictive Coding (LPC) coefficients as a ZFilter object, the analysis whitening filter. This implementation uses the autocorrelation method, using numpy.linalg.pinv as a linear system solver. Parameters ---------- blk : An iterable with well-defined length. Don't use this function with Stream objects! order : The order of the resulting ZFilter object. Defaults to ``len(blk) - 1``. Returns ------- A FIR filter, as a ZFilter object. The mean squared error over the given block is in its "error" attribute. Hint ---- See ``lpc.kautocor`` example, which should apply equally for this strategy. See Also -------- lpc.autocor: LPC coefficients by using one of the autocorrelation method strategies. lpc.kautocor: LPC coefficients obtained with Levinson-Durbin algorithm. """ from numpy import matrix from numpy.linalg import pinv acdata = acorr(blk, order) coeffs = pinv(toeplitz(acdata[:-1])) * -matrix(acdata[1:]).T coeffs = coeffs.T.tolist()[0] filt = 1 + sum(ai * z ** -i for i, ai in enumerate(coeffs, 1)) filt.error = acdata[0] + sum(a * c for a, c in xzip(acdata[1:], coeffs)) return filt
python
def lpc(blk, order=None): """ Find the Linear Predictive Coding (LPC) coefficients as a ZFilter object, the analysis whitening filter. This implementation uses the autocorrelation method, using numpy.linalg.pinv as a linear system solver. Parameters ---------- blk : An iterable with well-defined length. Don't use this function with Stream objects! order : The order of the resulting ZFilter object. Defaults to ``len(blk) - 1``. Returns ------- A FIR filter, as a ZFilter object. The mean squared error over the given block is in its "error" attribute. Hint ---- See ``lpc.kautocor`` example, which should apply equally for this strategy. See Also -------- lpc.autocor: LPC coefficients by using one of the autocorrelation method strategies. lpc.kautocor: LPC coefficients obtained with Levinson-Durbin algorithm. """ from numpy import matrix from numpy.linalg import pinv acdata = acorr(blk, order) coeffs = pinv(toeplitz(acdata[:-1])) * -matrix(acdata[1:]).T coeffs = coeffs.T.tolist()[0] filt = 1 + sum(ai * z ** -i for i, ai in enumerate(coeffs, 1)) filt.error = acdata[0] + sum(a * c for a, c in xzip(acdata[1:], coeffs)) return filt
['def', 'lpc', '(', 'blk', ',', 'order', '=', 'None', ')', ':', 'from', 'numpy', 'import', 'matrix', 'from', 'numpy', '.', 'linalg', 'import', 'pinv', 'acdata', '=', 'acorr', '(', 'blk', ',', 'order', ')', 'coeffs', '=', 'pinv', '(', 'toeplitz', '(', 'acdata', '[', ':', '-', '1', ']', ')', ')', '*', '-', 'matrix', '(', 'acdata', '[', '1', ':', ']', ')', '.', 'T', 'coeffs', '=', 'coeffs', '.', 'T', '.', 'tolist', '(', ')', '[', '0', ']', 'filt', '=', '1', '+', 'sum', '(', 'ai', '*', 'z', '**', '-', 'i', 'for', 'i', ',', 'ai', 'in', 'enumerate', '(', 'coeffs', ',', '1', ')', ')', 'filt', '.', 'error', '=', 'acdata', '[', '0', ']', '+', 'sum', '(', 'a', '*', 'c', 'for', 'a', ',', 'c', 'in', 'xzip', '(', 'acdata', '[', '1', ':', ']', ',', 'coeffs', ')', ')', 'return', 'filt']
Find the Linear Predictive Coding (LPC) coefficients as a ZFilter object, the analysis whitening filter. This implementation uses the autocorrelation method, using numpy.linalg.pinv as a linear system solver. Parameters ---------- blk : An iterable with well-defined length. Don't use this function with Stream objects! order : The order of the resulting ZFilter object. Defaults to ``len(blk) - 1``. Returns ------- A FIR filter, as a ZFilter object. The mean squared error over the given block is in its "error" attribute. Hint ---- See ``lpc.kautocor`` example, which should apply equally for this strategy. See Also -------- lpc.autocor: LPC coefficients by using one of the autocorrelation method strategies. lpc.kautocor: LPC coefficients obtained with Levinson-Durbin algorithm.
['Find', 'the', 'Linear', 'Predictive', 'Coding', '(', 'LPC', ')', 'coefficients', 'as', 'a', 'ZFilter', 'object', 'the', 'analysis', 'whitening', 'filter', '.', 'This', 'implementation', 'uses', 'the', 'autocorrelation', 'method', 'using', 'numpy', '.', 'linalg', '.', 'pinv', 'as', 'a', 'linear', 'system', 'solver', '.']
train
https://github.com/danilobellini/audiolazy/blob/dba0a278937909980ed40b976d866b8e97c35dee/audiolazy/lazy_lpc.py#L187-L225
709
facetoe/zenpy
zenpy/lib/api.py
BaseApi.check_ratelimit_budget
def check_ratelimit_budget(self, seconds_waited): """ If we have a ratelimit_budget, ensure it is not exceeded. """ if self.ratelimit_budget is not None: self.ratelimit_budget -= seconds_waited if self.ratelimit_budget < 1: raise RatelimitBudgetExceeded("Rate limit budget exceeded!")
python
def check_ratelimit_budget(self, seconds_waited): """ If we have a ratelimit_budget, ensure it is not exceeded. """ if self.ratelimit_budget is not None: self.ratelimit_budget -= seconds_waited if self.ratelimit_budget < 1: raise RatelimitBudgetExceeded("Rate limit budget exceeded!")
['def', 'check_ratelimit_budget', '(', 'self', ',', 'seconds_waited', ')', ':', 'if', 'self', '.', 'ratelimit_budget', 'is', 'not', 'None', ':', 'self', '.', 'ratelimit_budget', '-=', 'seconds_waited', 'if', 'self', '.', 'ratelimit_budget', '<', '1', ':', 'raise', 'RatelimitBudgetExceeded', '(', '"Rate limit budget exceeded!"', ')']
If we have a ratelimit_budget, ensure it is not exceeded.
['If', 'we', 'have', 'a', 'ratelimit_budget', 'ensure', 'it', 'is', 'not', 'exceeded', '.']
train
https://github.com/facetoe/zenpy/blob/34c54c7e408b9ed01604ddf8b3422204c8bf31ea/zenpy/lib/api.py#L153-L158
710
saltstack/salt
salt/modules/boto_kms.py
generate_data_key
def generate_data_key(key_id, encryption_context=None, number_of_bytes=None, key_spec=None, grant_tokens=None, region=None, key=None, keyid=None, profile=None): ''' Generate a secure data key. CLI example:: salt myminion boto_kms.generate_data_key 'alias/mykey' number_of_bytes=1024 key_spec=AES_128 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) r = {} try: data_key = conn.generate_data_key( key_id, encryption_context=encryption_context, number_of_bytes=number_of_bytes, key_spec=key_spec, grant_tokens=grant_tokens ) r['data_key'] = data_key except boto.exception.BotoServerError as e: r['error'] = __utils__['boto.get_error'](e) return r
python
def generate_data_key(key_id, encryption_context=None, number_of_bytes=None, key_spec=None, grant_tokens=None, region=None, key=None, keyid=None, profile=None): ''' Generate a secure data key. CLI example:: salt myminion boto_kms.generate_data_key 'alias/mykey' number_of_bytes=1024 key_spec=AES_128 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) r = {} try: data_key = conn.generate_data_key( key_id, encryption_context=encryption_context, number_of_bytes=number_of_bytes, key_spec=key_spec, grant_tokens=grant_tokens ) r['data_key'] = data_key except boto.exception.BotoServerError as e: r['error'] = __utils__['boto.get_error'](e) return r
['def', 'generate_data_key', '(', 'key_id', ',', 'encryption_context', '=', 'None', ',', 'number_of_bytes', '=', 'None', ',', 'key_spec', '=', 'None', ',', 'grant_tokens', '=', 'None', ',', 'region', '=', 'None', ',', 'key', '=', 'None', ',', 'keyid', '=', 'None', ',', 'profile', '=', 'None', ')', ':', 'conn', '=', '_get_conn', '(', 'region', '=', 'region', ',', 'key', '=', 'key', ',', 'keyid', '=', 'keyid', ',', 'profile', '=', 'profile', ')', 'r', '=', '{', '}', 'try', ':', 'data_key', '=', 'conn', '.', 'generate_data_key', '(', 'key_id', ',', 'encryption_context', '=', 'encryption_context', ',', 'number_of_bytes', '=', 'number_of_bytes', ',', 'key_spec', '=', 'key_spec', ',', 'grant_tokens', '=', 'grant_tokens', ')', 'r', '[', "'data_key'", ']', '=', 'data_key', 'except', 'boto', '.', 'exception', '.', 'BotoServerError', 'as', 'e', ':', 'r', '[', "'error'", ']', '=', '__utils__', '[', "'boto.get_error'", ']', '(', 'e', ')', 'return', 'r']
Generate a secure data key. CLI example:: salt myminion boto_kms.generate_data_key 'alias/mykey' number_of_bytes=1024 key_spec=AES_128
['Generate', 'a', 'secure', 'data', 'key', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_kms.py#L340-L364
711
xolox/python-verboselogs
verboselogs/__init__.py
VerboseLogger.verbose
def verbose(self, msg, *args, **kw): """Log a message with level :data:`VERBOSE`. The arguments are interpreted as for :func:`logging.debug()`.""" if self.isEnabledFor(VERBOSE): self._log(VERBOSE, msg, args, **kw)
python
def verbose(self, msg, *args, **kw): """Log a message with level :data:`VERBOSE`. The arguments are interpreted as for :func:`logging.debug()`.""" if self.isEnabledFor(VERBOSE): self._log(VERBOSE, msg, args, **kw)
['def', 'verbose', '(', 'self', ',', 'msg', ',', '*', 'args', ',', '*', '*', 'kw', ')', ':', 'if', 'self', '.', 'isEnabledFor', '(', 'VERBOSE', ')', ':', 'self', '.', '_log', '(', 'VERBOSE', ',', 'msg', ',', 'args', ',', '*', '*', 'kw', ')']
Log a message with level :data:`VERBOSE`. The arguments are interpreted as for :func:`logging.debug()`.
['Log', 'a', 'message', 'with', 'level', ':', 'data', ':', 'VERBOSE', '.', 'The', 'arguments', 'are', 'interpreted', 'as', 'for', ':', 'func', ':', 'logging', '.', 'debug', '()', '.']
train
https://github.com/xolox/python-verboselogs/blob/3cebc69e03588bb6c3726c38c324b12732989292/verboselogs/__init__.py#L163-L166
712
aiogram/aiogram
aiogram/dispatcher/webhook.py
WebhookRequestHandler.respond_via_request
def respond_via_request(self, task): """ Handle response after 55 second. :param task: :return: """ warn(f"Detected slow response into webhook. " f"(Greater than {RESPONSE_TIMEOUT} seconds)\n" f"Recommended to use 'async_task' decorator from Dispatcher for handler with long timeouts.", TimeoutWarning) dispatcher = self.get_dispatcher() loop = dispatcher.loop try: results = task.result() except Exception as e: loop.create_task( dispatcher.errors_handlers.notify(dispatcher, types.Update.get_current(), e)) else: response = self.get_response(results) if response is not None: asyncio.ensure_future(response.execute_response(dispatcher.bot), loop=loop)
python
def respond_via_request(self, task): """ Handle response after 55 second. :param task: :return: """ warn(f"Detected slow response into webhook. " f"(Greater than {RESPONSE_TIMEOUT} seconds)\n" f"Recommended to use 'async_task' decorator from Dispatcher for handler with long timeouts.", TimeoutWarning) dispatcher = self.get_dispatcher() loop = dispatcher.loop try: results = task.result() except Exception as e: loop.create_task( dispatcher.errors_handlers.notify(dispatcher, types.Update.get_current(), e)) else: response = self.get_response(results) if response is not None: asyncio.ensure_future(response.execute_response(dispatcher.bot), loop=loop)
['def', 'respond_via_request', '(', 'self', ',', 'task', ')', ':', 'warn', '(', 'f"Detected slow response into webhook. "', 'f"(Greater than {RESPONSE_TIMEOUT} seconds)\\n"', 'f"Recommended to use \'async_task\' decorator from Dispatcher for handler with long timeouts."', ',', 'TimeoutWarning', ')', 'dispatcher', '=', 'self', '.', 'get_dispatcher', '(', ')', 'loop', '=', 'dispatcher', '.', 'loop', 'try', ':', 'results', '=', 'task', '.', 'result', '(', ')', 'except', 'Exception', 'as', 'e', ':', 'loop', '.', 'create_task', '(', 'dispatcher', '.', 'errors_handlers', '.', 'notify', '(', 'dispatcher', ',', 'types', '.', 'Update', '.', 'get_current', '(', ')', ',', 'e', ')', ')', 'else', ':', 'response', '=', 'self', '.', 'get_response', '(', 'results', ')', 'if', 'response', 'is', 'not', 'None', ':', 'asyncio', '.', 'ensure_future', '(', 'response', '.', 'execute_response', '(', 'dispatcher', '.', 'bot', ')', ',', 'loop', '=', 'loop', ')']
Handle response after 55 second. :param task: :return:
['Handle', 'response', 'after', '55', 'second', '.']
train
https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/dispatcher/webhook.py#L196-L219
713
geertj/gruvi
lib/gruvi/sync.py
Condition.wait_for
def wait_for(self, predicate, timeout=None): """Like :meth:`wait` but additionally for *predicate* to be true. The *predicate* argument must be a callable that takes no arguments. Its result is interpreted as a boolean value. """ if not is_locked(self._lock): raise RuntimeError('lock is not locked') hub = get_hub() try: with switch_back(timeout, lock=thread_lock(self._lock)) as switcher: handle = add_callback(self, switcher, predicate) # See the comment in Lock.acquire() why it is OK to release the # lock here before calling hub.switch(). # Also if this is a reentrant lock make sure it is fully released. state = release_save(self._lock) hub.switch() except BaseException as e: with self._lock: remove_callback(self, handle) if e is switcher.timeout: return False raise finally: acquire_restore(self._lock, state) return True
python
def wait_for(self, predicate, timeout=None): """Like :meth:`wait` but additionally for *predicate* to be true. The *predicate* argument must be a callable that takes no arguments. Its result is interpreted as a boolean value. """ if not is_locked(self._lock): raise RuntimeError('lock is not locked') hub = get_hub() try: with switch_back(timeout, lock=thread_lock(self._lock)) as switcher: handle = add_callback(self, switcher, predicate) # See the comment in Lock.acquire() why it is OK to release the # lock here before calling hub.switch(). # Also if this is a reentrant lock make sure it is fully released. state = release_save(self._lock) hub.switch() except BaseException as e: with self._lock: remove_callback(self, handle) if e is switcher.timeout: return False raise finally: acquire_restore(self._lock, state) return True
['def', 'wait_for', '(', 'self', ',', 'predicate', ',', 'timeout', '=', 'None', ')', ':', 'if', 'not', 'is_locked', '(', 'self', '.', '_lock', ')', ':', 'raise', 'RuntimeError', '(', "'lock is not locked'", ')', 'hub', '=', 'get_hub', '(', ')', 'try', ':', 'with', 'switch_back', '(', 'timeout', ',', 'lock', '=', 'thread_lock', '(', 'self', '.', '_lock', ')', ')', 'as', 'switcher', ':', 'handle', '=', 'add_callback', '(', 'self', ',', 'switcher', ',', 'predicate', ')', '# See the comment in Lock.acquire() why it is OK to release the', '# lock here before calling hub.switch().', '# Also if this is a reentrant lock make sure it is fully released.', 'state', '=', 'release_save', '(', 'self', '.', '_lock', ')', 'hub', '.', 'switch', '(', ')', 'except', 'BaseException', 'as', 'e', ':', 'with', 'self', '.', '_lock', ':', 'remove_callback', '(', 'self', ',', 'handle', ')', 'if', 'e', 'is', 'switcher', '.', 'timeout', ':', 'return', 'False', 'raise', 'finally', ':', 'acquire_restore', '(', 'self', '.', '_lock', ',', 'state', ')', 'return', 'True']
Like :meth:`wait` but additionally for *predicate* to be true. The *predicate* argument must be a callable that takes no arguments. Its result is interpreted as a boolean value.
['Like', ':', 'meth', ':', 'wait', 'but', 'additionally', 'for', '*', 'predicate', '*', 'to', 'be', 'true', '.']
train
https://github.com/geertj/gruvi/blob/1d77ca439600b6ea7a19aa1ee85dca0f3be3f3f8/lib/gruvi/sync.py#L381-L406
714
wbond/asn1crypto
asn1crypto/x509.py
Certificate.authority_issuer_serial
def authority_issuer_serial(self): """ :return: None or a byte string of the SHA-256 hash of the isser from the authority key identifier extension concatenated with the ascii character ":", concatenated with the serial number from the authority key identifier extension as an ascii string """ if self._authority_issuer_serial is False: akiv = self.authority_key_identifier_value if akiv and akiv['authority_cert_issuer'].native: issuer = self.authority_key_identifier_value['authority_cert_issuer'][0].chosen # We untag the element since it is tagged via being a choice from GeneralName issuer = issuer.untag() authority_serial = self.authority_key_identifier_value['authority_cert_serial_number'].native self._authority_issuer_serial = issuer.sha256 + b':' + str_cls(authority_serial).encode('ascii') else: self._authority_issuer_serial = None return self._authority_issuer_serial
python
def authority_issuer_serial(self): """ :return: None or a byte string of the SHA-256 hash of the isser from the authority key identifier extension concatenated with the ascii character ":", concatenated with the serial number from the authority key identifier extension as an ascii string """ if self._authority_issuer_serial is False: akiv = self.authority_key_identifier_value if akiv and akiv['authority_cert_issuer'].native: issuer = self.authority_key_identifier_value['authority_cert_issuer'][0].chosen # We untag the element since it is tagged via being a choice from GeneralName issuer = issuer.untag() authority_serial = self.authority_key_identifier_value['authority_cert_serial_number'].native self._authority_issuer_serial = issuer.sha256 + b':' + str_cls(authority_serial).encode('ascii') else: self._authority_issuer_serial = None return self._authority_issuer_serial
['def', 'authority_issuer_serial', '(', 'self', ')', ':', 'if', 'self', '.', '_authority_issuer_serial', 'is', 'False', ':', 'akiv', '=', 'self', '.', 'authority_key_identifier_value', 'if', 'akiv', 'and', 'akiv', '[', "'authority_cert_issuer'", ']', '.', 'native', ':', 'issuer', '=', 'self', '.', 'authority_key_identifier_value', '[', "'authority_cert_issuer'", ']', '[', '0', ']', '.', 'chosen', '# We untag the element since it is tagged via being a choice from GeneralName', 'issuer', '=', 'issuer', '.', 'untag', '(', ')', 'authority_serial', '=', 'self', '.', 'authority_key_identifier_value', '[', "'authority_cert_serial_number'", ']', '.', 'native', 'self', '.', '_authority_issuer_serial', '=', 'issuer', '.', 'sha256', '+', "b':'", '+', 'str_cls', '(', 'authority_serial', ')', '.', 'encode', '(', "'ascii'", ')', 'else', ':', 'self', '.', '_authority_issuer_serial', '=', 'None', 'return', 'self', '.', '_authority_issuer_serial']
:return: None or a byte string of the SHA-256 hash of the isser from the authority key identifier extension concatenated with the ascii character ":", concatenated with the serial number from the authority key identifier extension as an ascii string
[':', 'return', ':', 'None', 'or', 'a', 'byte', 'string', 'of', 'the', 'SHA', '-', '256', 'hash', 'of', 'the', 'isser', 'from', 'the', 'authority', 'key', 'identifier', 'extension', 'concatenated', 'with', 'the', 'ascii', 'character', ':', 'concatenated', 'with', 'the', 'serial', 'number', 'from', 'the', 'authority', 'key', 'identifier', 'extension', 'as', 'an', 'ascii', 'string']
train
https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/x509.py#L2612-L2631
715
hobu/mgrs
mgrs/__init__.py
MGRS.ddtodms
def ddtodms(self, dd): """Take in dd string and convert to dms""" negative = dd < 0 dd = abs(dd) minutes,seconds = divmod(dd*3600,60) degrees,minutes = divmod(minutes,60) if negative: if degrees > 0: degrees = -degrees elif minutes > 0: minutes = -minutes else: seconds = -seconds return (degrees,minutes,seconds)
python
def ddtodms(self, dd): """Take in dd string and convert to dms""" negative = dd < 0 dd = abs(dd) minutes,seconds = divmod(dd*3600,60) degrees,minutes = divmod(minutes,60) if negative: if degrees > 0: degrees = -degrees elif minutes > 0: minutes = -minutes else: seconds = -seconds return (degrees,minutes,seconds)
['def', 'ddtodms', '(', 'self', ',', 'dd', ')', ':', 'negative', '=', 'dd', '<', '0', 'dd', '=', 'abs', '(', 'dd', ')', 'minutes', ',', 'seconds', '=', 'divmod', '(', 'dd', '*', '3600', ',', '60', ')', 'degrees', ',', 'minutes', '=', 'divmod', '(', 'minutes', ',', '60', ')', 'if', 'negative', ':', 'if', 'degrees', '>', '0', ':', 'degrees', '=', '-', 'degrees', 'elif', 'minutes', '>', '0', ':', 'minutes', '=', '-', 'minutes', 'else', ':', 'seconds', '=', '-', 'seconds', 'return', '(', 'degrees', ',', 'minutes', ',', 'seconds', ')']
Take in dd string and convert to dms
['Take', 'in', 'dd', 'string', 'and', 'convert', 'to', 'dms']
train
https://github.com/hobu/mgrs/blob/759b3aba86779318854c73b8843ea956acb5eb3f/mgrs/__init__.py#L13-L26
716
pyopenapi/pyswagger
pyswagger/spec/base.py
BaseObj.compare
def compare(self, other, base=None): """ comparison, will return the first difference """ if self.__class__ != other.__class__: return False, '' names = self._field_names_ def cmp_func(name, s, o): # special case for string types if isinstance(s, six.string_types) and isinstance(o, six.string_types): return s == o, name if s.__class__ != o.__class__: return False, name if isinstance(s, BaseObj): if not isinstance(s, weakref.ProxyTypes): return s.compare(o, name) elif isinstance(s, list): for i, v in zip(range(len(s)), s): same, n = cmp_func(jp_compose(str(i), name), v, o[i]) if not same: return same, n elif isinstance(s, dict): # compare if any key diff diff = list(set(s.keys()) - set(o.keys())) if diff: return False, jp_compose(str(diff[0]), name) diff = list(set(o.keys()) - set(s.keys())) if diff: return False, jp_compose(str(diff[0]), name) for k, v in six.iteritems(s): same, n = cmp_func(jp_compose(k, name), v, o[k]) if not same: return same, n else: return s == o, name return True, name for n in names: same, n = cmp_func(jp_compose(n, base), getattr(self, n), getattr(other, n)) if not same: return same, n return True, ''
python
def compare(self, other, base=None): """ comparison, will return the first difference """ if self.__class__ != other.__class__: return False, '' names = self._field_names_ def cmp_func(name, s, o): # special case for string types if isinstance(s, six.string_types) and isinstance(o, six.string_types): return s == o, name if s.__class__ != o.__class__: return False, name if isinstance(s, BaseObj): if not isinstance(s, weakref.ProxyTypes): return s.compare(o, name) elif isinstance(s, list): for i, v in zip(range(len(s)), s): same, n = cmp_func(jp_compose(str(i), name), v, o[i]) if not same: return same, n elif isinstance(s, dict): # compare if any key diff diff = list(set(s.keys()) - set(o.keys())) if diff: return False, jp_compose(str(diff[0]), name) diff = list(set(o.keys()) - set(s.keys())) if diff: return False, jp_compose(str(diff[0]), name) for k, v in six.iteritems(s): same, n = cmp_func(jp_compose(k, name), v, o[k]) if not same: return same, n else: return s == o, name return True, name for n in names: same, n = cmp_func(jp_compose(n, base), getattr(self, n), getattr(other, n)) if not same: return same, n return True, ''
['def', 'compare', '(', 'self', ',', 'other', ',', 'base', '=', 'None', ')', ':', 'if', 'self', '.', '__class__', '!=', 'other', '.', '__class__', ':', 'return', 'False', ',', "''", 'names', '=', 'self', '.', '_field_names_', 'def', 'cmp_func', '(', 'name', ',', 's', ',', 'o', ')', ':', '# special case for string types', 'if', 'isinstance', '(', 's', ',', 'six', '.', 'string_types', ')', 'and', 'isinstance', '(', 'o', ',', 'six', '.', 'string_types', ')', ':', 'return', 's', '==', 'o', ',', 'name', 'if', 's', '.', '__class__', '!=', 'o', '.', '__class__', ':', 'return', 'False', ',', 'name', 'if', 'isinstance', '(', 's', ',', 'BaseObj', ')', ':', 'if', 'not', 'isinstance', '(', 's', ',', 'weakref', '.', 'ProxyTypes', ')', ':', 'return', 's', '.', 'compare', '(', 'o', ',', 'name', ')', 'elif', 'isinstance', '(', 's', ',', 'list', ')', ':', 'for', 'i', ',', 'v', 'in', 'zip', '(', 'range', '(', 'len', '(', 's', ')', ')', ',', 's', ')', ':', 'same', ',', 'n', '=', 'cmp_func', '(', 'jp_compose', '(', 'str', '(', 'i', ')', ',', 'name', ')', ',', 'v', ',', 'o', '[', 'i', ']', ')', 'if', 'not', 'same', ':', 'return', 'same', ',', 'n', 'elif', 'isinstance', '(', 's', ',', 'dict', ')', ':', '# compare if any key diff', 'diff', '=', 'list', '(', 'set', '(', 's', '.', 'keys', '(', ')', ')', '-', 'set', '(', 'o', '.', 'keys', '(', ')', ')', ')', 'if', 'diff', ':', 'return', 'False', ',', 'jp_compose', '(', 'str', '(', 'diff', '[', '0', ']', ')', ',', 'name', ')', 'diff', '=', 'list', '(', 'set', '(', 'o', '.', 'keys', '(', ')', ')', '-', 'set', '(', 's', '.', 'keys', '(', ')', ')', ')', 'if', 'diff', ':', 'return', 'False', ',', 'jp_compose', '(', 'str', '(', 'diff', '[', '0', ']', ')', ',', 'name', ')', 'for', 'k', ',', 'v', 'in', 'six', '.', 'iteritems', '(', 's', ')', ':', 'same', ',', 'n', '=', 'cmp_func', '(', 'jp_compose', '(', 'k', ',', 'name', ')', ',', 'v', ',', 'o', '[', 'k', ']', ')', 'if', 'not', 'same', ':', 'return', 'same', ',', 'n', 'else', ':', 'return', 's', '==', 'o', ',', 'name', 'return', 'True', ',', 'name', 'for', 'n', 'in', 'names', ':', 'same', ',', 'n', '=', 'cmp_func', '(', 'jp_compose', '(', 'n', ',', 'base', ')', ',', 'getattr', '(', 'self', ',', 'n', ')', ',', 'getattr', '(', 'other', ',', 'n', ')', ')', 'if', 'not', 'same', ':', 'return', 'same', ',', 'n', 'return', 'True', ',', "''"]
comparison, will return the first difference
['comparison', 'will', 'return', 'the', 'first', 'difference']
train
https://github.com/pyopenapi/pyswagger/blob/333c4ca08e758cd2194943d9904a3eda3fe43977/pyswagger/spec/base.py#L345-L391
717
tanghaibao/goatools
goatools/anno/dnld_ebi_goa.py
DnldGoa.dnld_goa
def dnld_goa(self, species, ext='gaf', item=None, fileout=None): """Download GOA source file name on EMBL-EBI ftp server.""" basename = self.get_basename(species, ext, item) src = os.path.join(self.ftp_src_goa, species.upper(), "{F}.gz".format(F=basename)) dst = os.path.join(os.getcwd(), basename) if fileout is None else fileout dnld_file(src, dst, prt=sys.stdout, loading_bar=None) return dst
python
def dnld_goa(self, species, ext='gaf', item=None, fileout=None): """Download GOA source file name on EMBL-EBI ftp server.""" basename = self.get_basename(species, ext, item) src = os.path.join(self.ftp_src_goa, species.upper(), "{F}.gz".format(F=basename)) dst = os.path.join(os.getcwd(), basename) if fileout is None else fileout dnld_file(src, dst, prt=sys.stdout, loading_bar=None) return dst
['def', 'dnld_goa', '(', 'self', ',', 'species', ',', 'ext', '=', "'gaf'", ',', 'item', '=', 'None', ',', 'fileout', '=', 'None', ')', ':', 'basename', '=', 'self', '.', 'get_basename', '(', 'species', ',', 'ext', ',', 'item', ')', 'src', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'ftp_src_goa', ',', 'species', '.', 'upper', '(', ')', ',', '"{F}.gz"', '.', 'format', '(', 'F', '=', 'basename', ')', ')', 'dst', '=', 'os', '.', 'path', '.', 'join', '(', 'os', '.', 'getcwd', '(', ')', ',', 'basename', ')', 'if', 'fileout', 'is', 'None', 'else', 'fileout', 'dnld_file', '(', 'src', ',', 'dst', ',', 'prt', '=', 'sys', '.', 'stdout', ',', 'loading_bar', '=', 'None', ')', 'return', 'dst']
Download GOA source file name on EMBL-EBI ftp server.
['Download', 'GOA', 'source', 'file', 'name', 'on', 'EMBL', '-', 'EBI', 'ftp', 'server', '.']
train
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/anno/dnld_ebi_goa.py#L41-L47
718
JukeboxPipeline/jukeboxmaya
src/jukeboxmaya/reftrack/asset.py
AssetReftypeInterface.get_scenenode
def get_scenenode(self, nodes): """Get the scenenode in the given nodes There should only be one scenenode in nodes! :param nodes: :type nodes: :returns: None :rtype: None :raises: AssertionError """ scenenodes = cmds.ls(nodes, type='jb_sceneNode') assert scenenodes, "Found no scene nodes!" return sorted(scenenodes)[0]
python
def get_scenenode(self, nodes): """Get the scenenode in the given nodes There should only be one scenenode in nodes! :param nodes: :type nodes: :returns: None :rtype: None :raises: AssertionError """ scenenodes = cmds.ls(nodes, type='jb_sceneNode') assert scenenodes, "Found no scene nodes!" return sorted(scenenodes)[0]
['def', 'get_scenenode', '(', 'self', ',', 'nodes', ')', ':', 'scenenodes', '=', 'cmds', '.', 'ls', '(', 'nodes', ',', 'type', '=', "'jb_sceneNode'", ')', 'assert', 'scenenodes', ',', '"Found no scene nodes!"', 'return', 'sorted', '(', 'scenenodes', ')', '[', '0', ']']
Get the scenenode in the given nodes There should only be one scenenode in nodes! :param nodes: :type nodes: :returns: None :rtype: None :raises: AssertionError
['Get', 'the', 'scenenode', 'in', 'the', 'given', 'nodes']
train
https://github.com/JukeboxPipeline/jukeboxmaya/blob/c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c/src/jukeboxmaya/reftrack/asset.py#L85-L98
719
theonion/django-bulbs
bulbs/sections/models.py
Section.get_content
def get_content(self): """performs es search and gets content objects """ if "query" in self.query: q = self.query["query"] else: q = self.query search = custom_search_model(Content, q, field_map={ "feature-type": "feature_type.slug", "tag": "tags.slug", "content-type": "_type", }) return search
python
def get_content(self): """performs es search and gets content objects """ if "query" in self.query: q = self.query["query"] else: q = self.query search = custom_search_model(Content, q, field_map={ "feature-type": "feature_type.slug", "tag": "tags.slug", "content-type": "_type", }) return search
['def', 'get_content', '(', 'self', ')', ':', 'if', '"query"', 'in', 'self', '.', 'query', ':', 'q', '=', 'self', '.', 'query', '[', '"query"', ']', 'else', ':', 'q', '=', 'self', '.', 'query', 'search', '=', 'custom_search_model', '(', 'Content', ',', 'q', ',', 'field_map', '=', '{', '"feature-type"', ':', '"feature_type.slug"', ',', '"tag"', ':', '"tags.slug"', ',', '"content-type"', ':', '"_type"', ',', '}', ')', 'return', 'search']
performs es search and gets content objects
['performs', 'es', 'search', 'and', 'gets', 'content', 'objects']
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/sections/models.py#L81-L93
720
apache/spark
python/pyspark/sql/dataframe.py
DataFrame.unpersist
def unpersist(self, blocking=False): """Marks the :class:`DataFrame` as non-persistent, and remove all blocks for it from memory and disk. .. note:: `blocking` default has changed to False to match Scala in 2.0. """ self.is_cached = False self._jdf.unpersist(blocking) return self
python
def unpersist(self, blocking=False): """Marks the :class:`DataFrame` as non-persistent, and remove all blocks for it from memory and disk. .. note:: `blocking` default has changed to False to match Scala in 2.0. """ self.is_cached = False self._jdf.unpersist(blocking) return self
['def', 'unpersist', '(', 'self', ',', 'blocking', '=', 'False', ')', ':', 'self', '.', 'is_cached', '=', 'False', 'self', '.', '_jdf', '.', 'unpersist', '(', 'blocking', ')', 'return', 'self']
Marks the :class:`DataFrame` as non-persistent, and remove all blocks for it from memory and disk. .. note:: `blocking` default has changed to False to match Scala in 2.0.
['Marks', 'the', ':', 'class', ':', 'DataFrame', 'as', 'non', '-', 'persistent', 'and', 'remove', 'all', 'blocks', 'for', 'it', 'from', 'memory', 'and', 'disk', '.']
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L626-L634
721
shaldengeki/python-mal
myanimelist/character.py
Character.parse_pictures
def parse_pictures(self, picture_page): """Parses the DOM and returns character pictures attributes. :type picture_page: :class:`bs4.BeautifulSoup` :param picture_page: MAL character pictures page's DOM :rtype: dict :return: character pictures attributes. """ character_info = self.parse_sidebar(picture_page) second_col = picture_page.find(u'div', {'id': 'content'}).find(u'table').find(u'tr').find_all(u'td', recursive=False)[1] try: picture_table = second_col.find(u'table', recursive=False) character_info[u'pictures'] = [] if picture_table: character_info[u'pictures'] = map(lambda img: img.get(u'src').decode('utf-8'), picture_table.find_all(u'img')) except: if not self.session.suppress_parse_exceptions: raise return character_info
python
def parse_pictures(self, picture_page): """Parses the DOM and returns character pictures attributes. :type picture_page: :class:`bs4.BeautifulSoup` :param picture_page: MAL character pictures page's DOM :rtype: dict :return: character pictures attributes. """ character_info = self.parse_sidebar(picture_page) second_col = picture_page.find(u'div', {'id': 'content'}).find(u'table').find(u'tr').find_all(u'td', recursive=False)[1] try: picture_table = second_col.find(u'table', recursive=False) character_info[u'pictures'] = [] if picture_table: character_info[u'pictures'] = map(lambda img: img.get(u'src').decode('utf-8'), picture_table.find_all(u'img')) except: if not self.session.suppress_parse_exceptions: raise return character_info
['def', 'parse_pictures', '(', 'self', ',', 'picture_page', ')', ':', 'character_info', '=', 'self', '.', 'parse_sidebar', '(', 'picture_page', ')', 'second_col', '=', 'picture_page', '.', 'find', '(', "u'div'", ',', '{', "'id'", ':', "'content'", '}', ')', '.', 'find', '(', "u'table'", ')', '.', 'find', '(', "u'tr'", ')', '.', 'find_all', '(', "u'td'", ',', 'recursive', '=', 'False', ')', '[', '1', ']', 'try', ':', 'picture_table', '=', 'second_col', '.', 'find', '(', "u'table'", ',', 'recursive', '=', 'False', ')', 'character_info', '[', "u'pictures'", ']', '=', '[', ']', 'if', 'picture_table', ':', 'character_info', '[', "u'pictures'", ']', '=', 'map', '(', 'lambda', 'img', ':', 'img', '.', 'get', '(', "u'src'", ')', '.', 'decode', '(', "'utf-8'", ')', ',', 'picture_table', '.', 'find_all', '(', "u'img'", ')', ')', 'except', ':', 'if', 'not', 'self', '.', 'session', '.', 'suppress_parse_exceptions', ':', 'raise', 'return', 'character_info']
Parses the DOM and returns character pictures attributes. :type picture_page: :class:`bs4.BeautifulSoup` :param picture_page: MAL character pictures page's DOM :rtype: dict :return: character pictures attributes.
['Parses', 'the', 'DOM', 'and', 'returns', 'character', 'pictures', 'attributes', '.']
train
https://github.com/shaldengeki/python-mal/blob/2c3356411a74d88ba13f6b970388040d696f8392/myanimelist/character.py#L226-L248
722
underworldcode/stripy
stripy-src/stripy/cartesian.py
Triangulation.neighbour_and_arc_simplices
def neighbour_and_arc_simplices(self): """ Get indices of neighbour simplices for each simplex and arc indices. Identical to get_neighbour_simplices() but also returns an array of indices that reside on boundary hull, -1 denotes no neighbour. """ nt, ltri, lct, ierr = _tripack.trlist(self.lst, self.lptr, self.lend, nrow=9) if ierr != 0: raise ValueError('ierr={} in trlist\n{}'.format(ierr, _ier_codes[ierr])) ltri = ltri.T[:nt] - 1 return ltri[:,3:6], ltri[:,6:]
python
def neighbour_and_arc_simplices(self): """ Get indices of neighbour simplices for each simplex and arc indices. Identical to get_neighbour_simplices() but also returns an array of indices that reside on boundary hull, -1 denotes no neighbour. """ nt, ltri, lct, ierr = _tripack.trlist(self.lst, self.lptr, self.lend, nrow=9) if ierr != 0: raise ValueError('ierr={} in trlist\n{}'.format(ierr, _ier_codes[ierr])) ltri = ltri.T[:nt] - 1 return ltri[:,3:6], ltri[:,6:]
['def', 'neighbour_and_arc_simplices', '(', 'self', ')', ':', 'nt', ',', 'ltri', ',', 'lct', ',', 'ierr', '=', '_tripack', '.', 'trlist', '(', 'self', '.', 'lst', ',', 'self', '.', 'lptr', ',', 'self', '.', 'lend', ',', 'nrow', '=', '9', ')', 'if', 'ierr', '!=', '0', ':', 'raise', 'ValueError', '(', "'ierr={} in trlist\\n{}'", '.', 'format', '(', 'ierr', ',', '_ier_codes', '[', 'ierr', ']', ')', ')', 'ltri', '=', 'ltri', '.', 'T', '[', ':', 'nt', ']', '-', '1', 'return', 'ltri', '[', ':', ',', '3', ':', '6', ']', ',', 'ltri', '[', ':', ',', '6', ':', ']']
Get indices of neighbour simplices for each simplex and arc indices. Identical to get_neighbour_simplices() but also returns an array of indices that reside on boundary hull, -1 denotes no neighbour.
['Get', 'indices', 'of', 'neighbour', 'simplices', 'for', 'each', 'simplex', 'and', 'arc', 'indices', '.', 'Identical', 'to', 'get_neighbour_simplices', '()', 'but', 'also', 'returns', 'an', 'array', 'of', 'indices', 'that', 'reside', 'on', 'boundary', 'hull', '-', '1', 'denotes', 'no', 'neighbour', '.']
train
https://github.com/underworldcode/stripy/blob/d4c3480c3e58c88489ded695eadbe7cd5bf94b48/stripy-src/stripy/cartesian.py#L590-L600
723
SBRG/ssbio
ssbio/protein/structure/structprop.py
StructProp.get_msms_annotations
def get_msms_annotations(self, outdir, force_rerun=False): """Run MSMS on this structure and store the residue depths/ca depths in the corresponding ChainProp SeqRecords """ # Now can run on Biopython Model objects exclusively thanks to Biopython updates # if self.file_type != 'pdb': # raise ValueError('{}: unable to run MSMS with "{}" file type. Please change file type to "pdb"'.format(self.id, # self.file_type)) if self.structure: parsed = self.structure else: parsed = self.parse_structure() if not parsed: log.error('{}: unable to open structure to run MSMS'.format(self.id)) return log.debug('{}: running MSMS'.format(self.id)) # PDB ID is currently set to the structure file so the output name is the same with _msms.df appended to it msms_results = ssbio.protein.structure.properties.msms.get_msms_df(model=parsed.first_model, pdb_id=self.structure_path, outdir=outdir, force_rerun=force_rerun) if msms_results.empty: log.error('{}: unable to run MSMS'.format(self.id)) return chains = msms_results.chain.unique() for chain in chains: res_depths = msms_results[msms_results.chain == chain].res_depth.tolist() ca_depths = msms_results[msms_results.chain == chain].ca_depth.tolist() chain_prop = self.chains.get_by_id(chain) chain_seq = chain_prop.seq_record # Making sure the X's are filled in res_depths = ssbio.protein.structure.properties.residues.match_structure_sequence(orig_seq=chain_seq, new_seq=res_depths, fill_with=float('Inf')) ca_depths = ssbio.protein.structure.properties.residues.match_structure_sequence(orig_seq=chain_seq, new_seq=ca_depths, fill_with=float('Inf')) chain_prop.seq_record.letter_annotations['RES_DEPTH-msms'] = res_depths chain_prop.seq_record.letter_annotations['CA_DEPTH-msms'] = ca_depths log.debug('{}: stored residue depths in chain seq_record letter_annotations'.format(chain))
python
def get_msms_annotations(self, outdir, force_rerun=False): """Run MSMS on this structure and store the residue depths/ca depths in the corresponding ChainProp SeqRecords """ # Now can run on Biopython Model objects exclusively thanks to Biopython updates # if self.file_type != 'pdb': # raise ValueError('{}: unable to run MSMS with "{}" file type. Please change file type to "pdb"'.format(self.id, # self.file_type)) if self.structure: parsed = self.structure else: parsed = self.parse_structure() if not parsed: log.error('{}: unable to open structure to run MSMS'.format(self.id)) return log.debug('{}: running MSMS'.format(self.id)) # PDB ID is currently set to the structure file so the output name is the same with _msms.df appended to it msms_results = ssbio.protein.structure.properties.msms.get_msms_df(model=parsed.first_model, pdb_id=self.structure_path, outdir=outdir, force_rerun=force_rerun) if msms_results.empty: log.error('{}: unable to run MSMS'.format(self.id)) return chains = msms_results.chain.unique() for chain in chains: res_depths = msms_results[msms_results.chain == chain].res_depth.tolist() ca_depths = msms_results[msms_results.chain == chain].ca_depth.tolist() chain_prop = self.chains.get_by_id(chain) chain_seq = chain_prop.seq_record # Making sure the X's are filled in res_depths = ssbio.protein.structure.properties.residues.match_structure_sequence(orig_seq=chain_seq, new_seq=res_depths, fill_with=float('Inf')) ca_depths = ssbio.protein.structure.properties.residues.match_structure_sequence(orig_seq=chain_seq, new_seq=ca_depths, fill_with=float('Inf')) chain_prop.seq_record.letter_annotations['RES_DEPTH-msms'] = res_depths chain_prop.seq_record.letter_annotations['CA_DEPTH-msms'] = ca_depths log.debug('{}: stored residue depths in chain seq_record letter_annotations'.format(chain))
['def', 'get_msms_annotations', '(', 'self', ',', 'outdir', ',', 'force_rerun', '=', 'False', ')', ':', '# Now can run on Biopython Model objects exclusively thanks to Biopython updates', "# if self.file_type != 'pdb':", '# raise ValueError(\'{}: unable to run MSMS with "{}" file type. Please change file type to "pdb"\'.format(self.id,', '# self.file_type))', 'if', 'self', '.', 'structure', ':', 'parsed', '=', 'self', '.', 'structure', 'else', ':', 'parsed', '=', 'self', '.', 'parse_structure', '(', ')', 'if', 'not', 'parsed', ':', 'log', '.', 'error', '(', "'{}: unable to open structure to run MSMS'", '.', 'format', '(', 'self', '.', 'id', ')', ')', 'return', 'log', '.', 'debug', '(', "'{}: running MSMS'", '.', 'format', '(', 'self', '.', 'id', ')', ')', '# PDB ID is currently set to the structure file so the output name is the same with _msms.df appended to it', 'msms_results', '=', 'ssbio', '.', 'protein', '.', 'structure', '.', 'properties', '.', 'msms', '.', 'get_msms_df', '(', 'model', '=', 'parsed', '.', 'first_model', ',', 'pdb_id', '=', 'self', '.', 'structure_path', ',', 'outdir', '=', 'outdir', ',', 'force_rerun', '=', 'force_rerun', ')', 'if', 'msms_results', '.', 'empty', ':', 'log', '.', 'error', '(', "'{}: unable to run MSMS'", '.', 'format', '(', 'self', '.', 'id', ')', ')', 'return', 'chains', '=', 'msms_results', '.', 'chain', '.', 'unique', '(', ')', 'for', 'chain', 'in', 'chains', ':', 'res_depths', '=', 'msms_results', '[', 'msms_results', '.', 'chain', '==', 'chain', ']', '.', 'res_depth', '.', 'tolist', '(', ')', 'ca_depths', '=', 'msms_results', '[', 'msms_results', '.', 'chain', '==', 'chain', ']', '.', 'ca_depth', '.', 'tolist', '(', ')', 'chain_prop', '=', 'self', '.', 'chains', '.', 'get_by_id', '(', 'chain', ')', 'chain_seq', '=', 'chain_prop', '.', 'seq_record', "# Making sure the X's are filled in", 'res_depths', '=', 'ssbio', '.', 'protein', '.', 'structure', '.', 'properties', '.', 'residues', '.', 'match_structure_sequence', '(', 'orig_seq', '=', 'chain_seq', ',', 'new_seq', '=', 'res_depths', ',', 'fill_with', '=', 'float', '(', "'Inf'", ')', ')', 'ca_depths', '=', 'ssbio', '.', 'protein', '.', 'structure', '.', 'properties', '.', 'residues', '.', 'match_structure_sequence', '(', 'orig_seq', '=', 'chain_seq', ',', 'new_seq', '=', 'ca_depths', ',', 'fill_with', '=', 'float', '(', "'Inf'", ')', ')', 'chain_prop', '.', 'seq_record', '.', 'letter_annotations', '[', "'RES_DEPTH-msms'", ']', '=', 'res_depths', 'chain_prop', '.', 'seq_record', '.', 'letter_annotations', '[', "'CA_DEPTH-msms'", ']', '=', 'ca_depths', 'log', '.', 'debug', '(', "'{}: stored residue depths in chain seq_record letter_annotations'", '.', 'format', '(', 'chain', ')', ')']
Run MSMS on this structure and store the residue depths/ca depths in the corresponding ChainProp SeqRecords
['Run', 'MSMS', 'on', 'this', 'structure', 'and', 'store', 'the', 'residue', 'depths', '/', 'ca', 'depths', 'in', 'the', 'corresponding', 'ChainProp', 'SeqRecords']
train
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/structprop.py#L501-L546
724
acorg/dark-matter
dark/process.py
Executor.execute
def execute(self, command): """ Execute (or simulate) a command. Add to our log. @param command: Either a C{str} command (which will be passed to the shell) or a C{list} of command arguments (including the executable name), in which case the shell is not used. @return: A C{CompletedProcess} instance. This has attributes such as C{returncode}, C{stdout}, and C{stderr}. See pydoc subprocess. """ if isinstance(command, six.string_types): # Can't have newlines in a command given to the shell. strCommand = command = command.replace('\n', ' ').strip() shell = True else: strCommand = ' '.join(command) shell = False if self._dryRun: self.log.append('$ ' + strCommand) return start = time() self.log.extend([ '# Start command (shell=%s) at %s' % (shell, ctime(start)), '$ ' + strCommand, ]) if six.PY3: try: result = run(command, check=True, stdout=PIPE, stderr=PIPE, shell=shell, universal_newlines=True) except CalledProcessError as e: from sys import stderr print('CalledProcessError:', e, file=stderr) print('STDOUT:\n%s' % e.stdout, file=stderr) print('STDERR:\n%s' % e.stderr, file=stderr) raise else: try: result = check_call(command, stdout=PIPE, stderr=PIPE, shell=shell, universal_newlines=True) except CalledProcessError as e: from sys import stderr print('CalledProcessError:', e, file=stderr) print('Return code: %s' % e.returncode, file=stderr) print('Output:\n%s' % e.output, file=stderr) raise stop = time() elapsed = (stop - start) self.log.extend([ '# Stop command at %s' % ctime(stop), '# Elapsed = %f seconds' % elapsed, ]) return result
python
def execute(self, command): """ Execute (or simulate) a command. Add to our log. @param command: Either a C{str} command (which will be passed to the shell) or a C{list} of command arguments (including the executable name), in which case the shell is not used. @return: A C{CompletedProcess} instance. This has attributes such as C{returncode}, C{stdout}, and C{stderr}. See pydoc subprocess. """ if isinstance(command, six.string_types): # Can't have newlines in a command given to the shell. strCommand = command = command.replace('\n', ' ').strip() shell = True else: strCommand = ' '.join(command) shell = False if self._dryRun: self.log.append('$ ' + strCommand) return start = time() self.log.extend([ '# Start command (shell=%s) at %s' % (shell, ctime(start)), '$ ' + strCommand, ]) if six.PY3: try: result = run(command, check=True, stdout=PIPE, stderr=PIPE, shell=shell, universal_newlines=True) except CalledProcessError as e: from sys import stderr print('CalledProcessError:', e, file=stderr) print('STDOUT:\n%s' % e.stdout, file=stderr) print('STDERR:\n%s' % e.stderr, file=stderr) raise else: try: result = check_call(command, stdout=PIPE, stderr=PIPE, shell=shell, universal_newlines=True) except CalledProcessError as e: from sys import stderr print('CalledProcessError:', e, file=stderr) print('Return code: %s' % e.returncode, file=stderr) print('Output:\n%s' % e.output, file=stderr) raise stop = time() elapsed = (stop - start) self.log.extend([ '# Stop command at %s' % ctime(stop), '# Elapsed = %f seconds' % elapsed, ]) return result
['def', 'execute', '(', 'self', ',', 'command', ')', ':', 'if', 'isinstance', '(', 'command', ',', 'six', '.', 'string_types', ')', ':', "# Can't have newlines in a command given to the shell.", 'strCommand', '=', 'command', '=', 'command', '.', 'replace', '(', "'\\n'", ',', "' '", ')', '.', 'strip', '(', ')', 'shell', '=', 'True', 'else', ':', 'strCommand', '=', "' '", '.', 'join', '(', 'command', ')', 'shell', '=', 'False', 'if', 'self', '.', '_dryRun', ':', 'self', '.', 'log', '.', 'append', '(', "'$ '", '+', 'strCommand', ')', 'return', 'start', '=', 'time', '(', ')', 'self', '.', 'log', '.', 'extend', '(', '[', "'# Start command (shell=%s) at %s'", '%', '(', 'shell', ',', 'ctime', '(', 'start', ')', ')', ',', "'$ '", '+', 'strCommand', ',', ']', ')', 'if', 'six', '.', 'PY3', ':', 'try', ':', 'result', '=', 'run', '(', 'command', ',', 'check', '=', 'True', ',', 'stdout', '=', 'PIPE', ',', 'stderr', '=', 'PIPE', ',', 'shell', '=', 'shell', ',', 'universal_newlines', '=', 'True', ')', 'except', 'CalledProcessError', 'as', 'e', ':', 'from', 'sys', 'import', 'stderr', 'print', '(', "'CalledProcessError:'", ',', 'e', ',', 'file', '=', 'stderr', ')', 'print', '(', "'STDOUT:\\n%s'", '%', 'e', '.', 'stdout', ',', 'file', '=', 'stderr', ')', 'print', '(', "'STDERR:\\n%s'", '%', 'e', '.', 'stderr', ',', 'file', '=', 'stderr', ')', 'raise', 'else', ':', 'try', ':', 'result', '=', 'check_call', '(', 'command', ',', 'stdout', '=', 'PIPE', ',', 'stderr', '=', 'PIPE', ',', 'shell', '=', 'shell', ',', 'universal_newlines', '=', 'True', ')', 'except', 'CalledProcessError', 'as', 'e', ':', 'from', 'sys', 'import', 'stderr', 'print', '(', "'CalledProcessError:'", ',', 'e', ',', 'file', '=', 'stderr', ')', 'print', '(', "'Return code: %s'", '%', 'e', '.', 'returncode', ',', 'file', '=', 'stderr', ')', 'print', '(', "'Output:\\n%s'", '%', 'e', '.', 'output', ',', 'file', '=', 'stderr', ')', 'raise', 'stop', '=', 'time', '(', ')', 'elapsed', '=', '(', 'stop', '-', 'start', ')', 'self', '.', 'log', '.', 'extend', '(', '[', "'# Stop command at %s'", '%', 'ctime', '(', 'stop', ')', ',', "'# Elapsed = %f seconds'", '%', 'elapsed', ',', ']', ')', 'return', 'result']
Execute (or simulate) a command. Add to our log. @param command: Either a C{str} command (which will be passed to the shell) or a C{list} of command arguments (including the executable name), in which case the shell is not used. @return: A C{CompletedProcess} instance. This has attributes such as C{returncode}, C{stdout}, and C{stderr}. See pydoc subprocess.
['Execute', '(', 'or', 'simulate', ')', 'a', 'command', '.', 'Add', 'to', 'our', 'log', '.']
train
https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/process.py#L23-L79
725
brentpayne/phrase
phrase/noun_phrase_dictionary.py
NounPhraseDictionary.convert_noun_phrases
def convert_noun_phrases(self, id_run, pos_run): """ Converts any identified phrases in the run into phrase_ids. The dictionary provides all acceptable phrases :param id_run: a run of token ids :param dictionary: a dictionary of acceptable phrases described as there component token ids :return: a run of token and phrase ids. """ i = 0 rv = [] while i < len(id_run): phrase_id, offset = PhraseDictionary.return_max_phrase(id_run, i, self) if phrase_id: if pos_run[i] in ('JJ', 'JJR', 'JJS', 'NN', 'NNS', 'NNP', 'NNPS', 'SYM', 'CD', 'VBG', 'FW', 'NP'): print "MERGED", pos_run[i], self.get_phrase(phrase_id) rv.append((phrase_id,'NP')) i = offset else: print "SKIPPED", pos_run[i], self.get_phrase(phrase_id) rv.append((id_run[i], pos_run[i])) i += 1 else: rv.append((id_run[i], pos_run[i])) i += 1 return rv
python
def convert_noun_phrases(self, id_run, pos_run): """ Converts any identified phrases in the run into phrase_ids. The dictionary provides all acceptable phrases :param id_run: a run of token ids :param dictionary: a dictionary of acceptable phrases described as there component token ids :return: a run of token and phrase ids. """ i = 0 rv = [] while i < len(id_run): phrase_id, offset = PhraseDictionary.return_max_phrase(id_run, i, self) if phrase_id: if pos_run[i] in ('JJ', 'JJR', 'JJS', 'NN', 'NNS', 'NNP', 'NNPS', 'SYM', 'CD', 'VBG', 'FW', 'NP'): print "MERGED", pos_run[i], self.get_phrase(phrase_id) rv.append((phrase_id,'NP')) i = offset else: print "SKIPPED", pos_run[i], self.get_phrase(phrase_id) rv.append((id_run[i], pos_run[i])) i += 1 else: rv.append((id_run[i], pos_run[i])) i += 1 return rv
['def', 'convert_noun_phrases', '(', 'self', ',', 'id_run', ',', 'pos_run', ')', ':', 'i', '=', '0', 'rv', '=', '[', ']', 'while', 'i', '<', 'len', '(', 'id_run', ')', ':', 'phrase_id', ',', 'offset', '=', 'PhraseDictionary', '.', 'return_max_phrase', '(', 'id_run', ',', 'i', ',', 'self', ')', 'if', 'phrase_id', ':', 'if', 'pos_run', '[', 'i', ']', 'in', '(', "'JJ'", ',', "'JJR'", ',', "'JJS'", ',', "'NN'", ',', "'NNS'", ',', "'NNP'", ',', "'NNPS'", ',', "'SYM'", ',', "'CD'", ',', "'VBG'", ',', "'FW'", ',', "'NP'", ')', ':', 'print', '"MERGED"', ',', 'pos_run', '[', 'i', ']', ',', 'self', '.', 'get_phrase', '(', 'phrase_id', ')', 'rv', '.', 'append', '(', '(', 'phrase_id', ',', "'NP'", ')', ')', 'i', '=', 'offset', 'else', ':', 'print', '"SKIPPED"', ',', 'pos_run', '[', 'i', ']', ',', 'self', '.', 'get_phrase', '(', 'phrase_id', ')', 'rv', '.', 'append', '(', '(', 'id_run', '[', 'i', ']', ',', 'pos_run', '[', 'i', ']', ')', ')', 'i', '+=', '1', 'else', ':', 'rv', '.', 'append', '(', '(', 'id_run', '[', 'i', ']', ',', 'pos_run', '[', 'i', ']', ')', ')', 'i', '+=', '1', 'return', 'rv']
Converts any identified phrases in the run into phrase_ids. The dictionary provides all acceptable phrases :param id_run: a run of token ids :param dictionary: a dictionary of acceptable phrases described as there component token ids :return: a run of token and phrase ids.
['Converts', 'any', 'identified', 'phrases', 'in', 'the', 'run', 'into', 'phrase_ids', '.', 'The', 'dictionary', 'provides', 'all', 'acceptable', 'phrases', ':', 'param', 'id_run', ':', 'a', 'run', 'of', 'token', 'ids', ':', 'param', 'dictionary', ':', 'a', 'dictionary', 'of', 'acceptable', 'phrases', 'described', 'as', 'there', 'component', 'token', 'ids', ':', 'return', ':', 'a', 'run', 'of', 'token', 'and', 'phrase', 'ids', '.']
train
https://github.com/brentpayne/phrase/blob/2c25e202eff0f284cb724a36cec1b22a1169e7a2/phrase/noun_phrase_dictionary.py#L31-L54
726
praekeltfoundation/molo.yourtips
molo/yourtips/templatetags/tip_tags.py
your_tips_on_tip_submission_form
def your_tips_on_tip_submission_form(context): """ A template tag to display the most recent and popular tip on the tip submission form. :param context: takes context """ context = copy(context) site_main = context['request'].site.root_page most_recent_tip = (YourTipsArticlePage.objects .descendant_of(site_main) .order_by('-latest_revision_created_at') .first()) most_popular_tip = (YourTipsArticlePage.objects .descendant_of(site_main) .filter(votes__gte=1) .order_by('-total_upvotes') .first()) context.update({ 'most_popular_tip': most_popular_tip, 'most_recent_tip': most_recent_tip, 'your_tip_page_slug': get_your_tip(context).slug }) return context
python
def your_tips_on_tip_submission_form(context): """ A template tag to display the most recent and popular tip on the tip submission form. :param context: takes context """ context = copy(context) site_main = context['request'].site.root_page most_recent_tip = (YourTipsArticlePage.objects .descendant_of(site_main) .order_by('-latest_revision_created_at') .first()) most_popular_tip = (YourTipsArticlePage.objects .descendant_of(site_main) .filter(votes__gte=1) .order_by('-total_upvotes') .first()) context.update({ 'most_popular_tip': most_popular_tip, 'most_recent_tip': most_recent_tip, 'your_tip_page_slug': get_your_tip(context).slug }) return context
['def', 'your_tips_on_tip_submission_form', '(', 'context', ')', ':', 'context', '=', 'copy', '(', 'context', ')', 'site_main', '=', 'context', '[', "'request'", ']', '.', 'site', '.', 'root_page', 'most_recent_tip', '=', '(', 'YourTipsArticlePage', '.', 'objects', '.', 'descendant_of', '(', 'site_main', ')', '.', 'order_by', '(', "'-latest_revision_created_at'", ')', '.', 'first', '(', ')', ')', 'most_popular_tip', '=', '(', 'YourTipsArticlePage', '.', 'objects', '.', 'descendant_of', '(', 'site_main', ')', '.', 'filter', '(', 'votes__gte', '=', '1', ')', '.', 'order_by', '(', "'-total_upvotes'", ')', '.', 'first', '(', ')', ')', 'context', '.', 'update', '(', '{', "'most_popular_tip'", ':', 'most_popular_tip', ',', "'most_recent_tip'", ':', 'most_recent_tip', ',', "'your_tip_page_slug'", ':', 'get_your_tip', '(', 'context', ')', '.', 'slug', '}', ')', 'return', 'context']
A template tag to display the most recent and popular tip on the tip submission form. :param context: takes context
['A', 'template', 'tag', 'to', 'display', 'the', 'most', 'recent', 'and', 'popular', 'tip', 'on', 'the', 'tip', 'submission', 'form', '.', ':', 'param', 'context', ':', 'takes', 'context']
train
https://github.com/praekeltfoundation/molo.yourtips/blob/8b3e3b1ff52cd4a78ccca5d153b3909a1f21625f/molo/yourtips/templatetags/tip_tags.py#L55-L80
727
libtcod/python-tcod
tcod/tileset.py
load_truetype_font
def load_truetype_font( path: str, tile_width: int, tile_height: int ) -> Tileset: """Return a new Tileset from a `.ttf` or `.otf` file. Same as :any:`set_truetype_font`, but returns a :any:`Tileset` instead. You can send this Tileset to :any:`set_default`. This function is provisional. The API may change. """ if not os.path.exists(path): raise RuntimeError("File not found:\n\t%s" % (os.path.realpath(path),)) return Tileset._claim( lib.TCOD_load_truetype_font_(path.encode(), tile_width, tile_height) )
python
def load_truetype_font( path: str, tile_width: int, tile_height: int ) -> Tileset: """Return a new Tileset from a `.ttf` or `.otf` file. Same as :any:`set_truetype_font`, but returns a :any:`Tileset` instead. You can send this Tileset to :any:`set_default`. This function is provisional. The API may change. """ if not os.path.exists(path): raise RuntimeError("File not found:\n\t%s" % (os.path.realpath(path),)) return Tileset._claim( lib.TCOD_load_truetype_font_(path.encode(), tile_width, tile_height) )
['def', 'load_truetype_font', '(', 'path', ':', 'str', ',', 'tile_width', ':', 'int', ',', 'tile_height', ':', 'int', ')', '->', 'Tileset', ':', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'path', ')', ':', 'raise', 'RuntimeError', '(', '"File not found:\\n\\t%s"', '%', '(', 'os', '.', 'path', '.', 'realpath', '(', 'path', ')', ',', ')', ')', 'return', 'Tileset', '.', '_claim', '(', 'lib', '.', 'TCOD_load_truetype_font_', '(', 'path', '.', 'encode', '(', ')', ',', 'tile_width', ',', 'tile_height', ')', ')']
Return a new Tileset from a `.ttf` or `.otf` file. Same as :any:`set_truetype_font`, but returns a :any:`Tileset` instead. You can send this Tileset to :any:`set_default`. This function is provisional. The API may change.
['Return', 'a', 'new', 'Tileset', 'from', 'a', '.', 'ttf', 'or', '.', 'otf', 'file', '.']
train
https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/tileset.py#L120-L134
728
secynic/ipwhois
ipwhois/scripts/ipwhois_cli.py
IPWhoisCLI.generate_output_events
def generate_output_events(self, source, key, val, line='2', hr=True, show_name=False, colorize=True): """ The function for generating CLI output RDAP events results. Args: source (:obj:`str`): The parent key 'network' or 'objects' (required). key (:obj:`str`): The event key 'events' or 'events_actor' (required). val (:obj:`dict`): The event dictionary (required). line (:obj:`str`): The line number (0-4). Determines indentation. Defaults to '0'. hr (:obj:`bool`): Enable human readable key translations. Defaults to True. show_name (:obj:`bool`): Show human readable name (default is to only show short). Defaults to False. colorize (:obj:`bool`): Colorize the console output with ANSI colors. Defaults to True. Returns: str: The generated output. """ output = generate_output( line=line, short=HR_RDAP[source][key]['_short'] if hr else key, name=HR_RDAP[source][key]['_name'] if (hr and show_name) else None, is_parent=False if (val is None or len(val) == 0) else True, value='None' if (val is None or len(val) == 0) else None, colorize=colorize ) if val is not None: count = 0 for item in val: try: action = item['action'] except KeyError: action = None try: timestamp = item['timestamp'] except KeyError: timestamp = None try: actor = item['actor'] except KeyError: actor = None if count > 0: output += generate_output( line=str(int(line)+1), is_parent=True, colorize=colorize ) output += generate_output( line=str(int(line)+1), short=HR_RDAP_COMMON[key]['action'][ '_short'] if hr else 'action', name=HR_RDAP_COMMON[key]['action'][ '_name'] if (hr and show_name) else None, value=action, colorize=colorize ) output += generate_output( line=str(int(line)+1), short=HR_RDAP_COMMON[key]['timestamp'][ '_short'] if hr else 'timestamp', name=HR_RDAP_COMMON[key]['timestamp'][ '_name'] if (hr and show_name) else None, value=timestamp, colorize=colorize ) output += generate_output( line=str(int(line)+1), short=HR_RDAP_COMMON[key]['actor'][ '_short'] if hr else 'actor', name=HR_RDAP_COMMON[key]['actor'][ '_name'] if (hr and show_name) else None, value=actor, colorize=colorize ) count += 1 return output
python
def generate_output_events(self, source, key, val, line='2', hr=True, show_name=False, colorize=True): """ The function for generating CLI output RDAP events results. Args: source (:obj:`str`): The parent key 'network' or 'objects' (required). key (:obj:`str`): The event key 'events' or 'events_actor' (required). val (:obj:`dict`): The event dictionary (required). line (:obj:`str`): The line number (0-4). Determines indentation. Defaults to '0'. hr (:obj:`bool`): Enable human readable key translations. Defaults to True. show_name (:obj:`bool`): Show human readable name (default is to only show short). Defaults to False. colorize (:obj:`bool`): Colorize the console output with ANSI colors. Defaults to True. Returns: str: The generated output. """ output = generate_output( line=line, short=HR_RDAP[source][key]['_short'] if hr else key, name=HR_RDAP[source][key]['_name'] if (hr and show_name) else None, is_parent=False if (val is None or len(val) == 0) else True, value='None' if (val is None or len(val) == 0) else None, colorize=colorize ) if val is not None: count = 0 for item in val: try: action = item['action'] except KeyError: action = None try: timestamp = item['timestamp'] except KeyError: timestamp = None try: actor = item['actor'] except KeyError: actor = None if count > 0: output += generate_output( line=str(int(line)+1), is_parent=True, colorize=colorize ) output += generate_output( line=str(int(line)+1), short=HR_RDAP_COMMON[key]['action'][ '_short'] if hr else 'action', name=HR_RDAP_COMMON[key]['action'][ '_name'] if (hr and show_name) else None, value=action, colorize=colorize ) output += generate_output( line=str(int(line)+1), short=HR_RDAP_COMMON[key]['timestamp'][ '_short'] if hr else 'timestamp', name=HR_RDAP_COMMON[key]['timestamp'][ '_name'] if (hr and show_name) else None, value=timestamp, colorize=colorize ) output += generate_output( line=str(int(line)+1), short=HR_RDAP_COMMON[key]['actor'][ '_short'] if hr else 'actor', name=HR_RDAP_COMMON[key]['actor'][ '_name'] if (hr and show_name) else None, value=actor, colorize=colorize ) count += 1 return output
['def', 'generate_output_events', '(', 'self', ',', 'source', ',', 'key', ',', 'val', ',', 'line', '=', "'2'", ',', 'hr', '=', 'True', ',', 'show_name', '=', 'False', ',', 'colorize', '=', 'True', ')', ':', 'output', '=', 'generate_output', '(', 'line', '=', 'line', ',', 'short', '=', 'HR_RDAP', '[', 'source', ']', '[', 'key', ']', '[', "'_short'", ']', 'if', 'hr', 'else', 'key', ',', 'name', '=', 'HR_RDAP', '[', 'source', ']', '[', 'key', ']', '[', "'_name'", ']', 'if', '(', 'hr', 'and', 'show_name', ')', 'else', 'None', ',', 'is_parent', '=', 'False', 'if', '(', 'val', 'is', 'None', 'or', 'len', '(', 'val', ')', '==', '0', ')', 'else', 'True', ',', 'value', '=', "'None'", 'if', '(', 'val', 'is', 'None', 'or', 'len', '(', 'val', ')', '==', '0', ')', 'else', 'None', ',', 'colorize', '=', 'colorize', ')', 'if', 'val', 'is', 'not', 'None', ':', 'count', '=', '0', 'for', 'item', 'in', 'val', ':', 'try', ':', 'action', '=', 'item', '[', "'action'", ']', 'except', 'KeyError', ':', 'action', '=', 'None', 'try', ':', 'timestamp', '=', 'item', '[', "'timestamp'", ']', 'except', 'KeyError', ':', 'timestamp', '=', 'None', 'try', ':', 'actor', '=', 'item', '[', "'actor'", ']', 'except', 'KeyError', ':', 'actor', '=', 'None', 'if', 'count', '>', '0', ':', 'output', '+=', 'generate_output', '(', 'line', '=', 'str', '(', 'int', '(', 'line', ')', '+', '1', ')', ',', 'is_parent', '=', 'True', ',', 'colorize', '=', 'colorize', ')', 'output', '+=', 'generate_output', '(', 'line', '=', 'str', '(', 'int', '(', 'line', ')', '+', '1', ')', ',', 'short', '=', 'HR_RDAP_COMMON', '[', 'key', ']', '[', "'action'", ']', '[', "'_short'", ']', 'if', 'hr', 'else', "'action'", ',', 'name', '=', 'HR_RDAP_COMMON', '[', 'key', ']', '[', "'action'", ']', '[', "'_name'", ']', 'if', '(', 'hr', 'and', 'show_name', ')', 'else', 'None', ',', 'value', '=', 'action', ',', 'colorize', '=', 'colorize', ')', 'output', '+=', 'generate_output', '(', 'line', '=', 'str', '(', 'int', '(', 'line', ')', '+', '1', ')', ',', 'short', '=', 'HR_RDAP_COMMON', '[', 'key', ']', '[', "'timestamp'", ']', '[', "'_short'", ']', 'if', 'hr', 'else', "'timestamp'", ',', 'name', '=', 'HR_RDAP_COMMON', '[', 'key', ']', '[', "'timestamp'", ']', '[', "'_name'", ']', 'if', '(', 'hr', 'and', 'show_name', ')', 'else', 'None', ',', 'value', '=', 'timestamp', ',', 'colorize', '=', 'colorize', ')', 'output', '+=', 'generate_output', '(', 'line', '=', 'str', '(', 'int', '(', 'line', ')', '+', '1', ')', ',', 'short', '=', 'HR_RDAP_COMMON', '[', 'key', ']', '[', "'actor'", ']', '[', "'_short'", ']', 'if', 'hr', 'else', "'actor'", ',', 'name', '=', 'HR_RDAP_COMMON', '[', 'key', ']', '[', "'actor'", ']', '[', "'_name'", ']', 'if', '(', 'hr', 'and', 'show_name', ')', 'else', 'None', ',', 'value', '=', 'actor', ',', 'colorize', '=', 'colorize', ')', 'count', '+=', '1', 'return', 'output']
The function for generating CLI output RDAP events results. Args: source (:obj:`str`): The parent key 'network' or 'objects' (required). key (:obj:`str`): The event key 'events' or 'events_actor' (required). val (:obj:`dict`): The event dictionary (required). line (:obj:`str`): The line number (0-4). Determines indentation. Defaults to '0'. hr (:obj:`bool`): Enable human readable key translations. Defaults to True. show_name (:obj:`bool`): Show human readable name (default is to only show short). Defaults to False. colorize (:obj:`bool`): Colorize the console output with ANSI colors. Defaults to True. Returns: str: The generated output.
['The', 'function', 'for', 'generating', 'CLI', 'output', 'RDAP', 'events', 'results', '.']
train
https://github.com/secynic/ipwhois/blob/b5d634d36b0b942d538d38d77b3bdcd815f155a0/ipwhois/scripts/ipwhois_cli.py#L534-L628
729
openregister/openregister-python
openregister/client.py
Client.config
def config(self, name, suffix): "Return config variable value, defaulting to environment" var = '%s_%s' % (name, suffix) var = var.upper().replace('-', '_') if var in self._config: return self._config[var] return os.environ[var]
python
def config(self, name, suffix): "Return config variable value, defaulting to environment" var = '%s_%s' % (name, suffix) var = var.upper().replace('-', '_') if var in self._config: return self._config[var] return os.environ[var]
['def', 'config', '(', 'self', ',', 'name', ',', 'suffix', ')', ':', 'var', '=', "'%s_%s'", '%', '(', 'name', ',', 'suffix', ')', 'var', '=', 'var', '.', 'upper', '(', ')', '.', 'replace', '(', "'-'", ',', "'_'", ')', 'if', 'var', 'in', 'self', '.', '_config', ':', 'return', 'self', '.', '_config', '[', 'var', ']', 'return', 'os', '.', 'environ', '[', 'var', ']']
Return config variable value, defaulting to environment
['Return', 'config', 'variable', 'value', 'defaulting', 'to', 'environment']
train
https://github.com/openregister/openregister-python/blob/cdb3ed9b454ff42cffdff4f25f7dbf8c22c517e4/openregister/client.py#L16-L22
730
kensho-technologies/graphql-compiler
graphql_compiler/schema.py
_parse_datetime_value
def _parse_datetime_value(value): """Deserialize a DateTime object from its proper ISO-8601 representation.""" if value.endswith('Z'): # Arrow doesn't support the "Z" literal to denote UTC time. # Strip the "Z" and add an explicit time zone instead. value = value[:-1] + '+00:00' return arrow.get(value, 'YYYY-MM-DDTHH:mm:ssZ').datetime
python
def _parse_datetime_value(value): """Deserialize a DateTime object from its proper ISO-8601 representation.""" if value.endswith('Z'): # Arrow doesn't support the "Z" literal to denote UTC time. # Strip the "Z" and add an explicit time zone instead. value = value[:-1] + '+00:00' return arrow.get(value, 'YYYY-MM-DDTHH:mm:ssZ').datetime
['def', '_parse_datetime_value', '(', 'value', ')', ':', 'if', 'value', '.', 'endswith', '(', "'Z'", ')', ':', '# Arrow doesn\'t support the "Z" literal to denote UTC time.', '# Strip the "Z" and add an explicit time zone instead.', 'value', '=', 'value', '[', ':', '-', '1', ']', '+', "'+00:00'", 'return', 'arrow', '.', 'get', '(', 'value', ',', "'YYYY-MM-DDTHH:mm:ssZ'", ')', '.', 'datetime']
Deserialize a DateTime object from its proper ISO-8601 representation.
['Deserialize', 'a', 'DateTime', 'object', 'from', 'its', 'proper', 'ISO', '-', '8601', 'representation', '.']
train
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/schema.py#L222-L229
731
jazzband/inflect
inflect.py
engine.plural_verb
def plural_verb(self, text, count=None): """ Return the plural of text, where text is a verb. If count supplied, then return text if count is one of: 1, a, an, one, each, every, this, that otherwise return the plural. Whitespace at the start and end is preserved. """ pre, word, post = self.partition_word(text) if not word: return text plural = self.postprocess( word, self._pl_special_verb(word, count) or self._pl_general_verb(word, count), ) return "{}{}{}".format(pre, plural, post)
python
def plural_verb(self, text, count=None): """ Return the plural of text, where text is a verb. If count supplied, then return text if count is one of: 1, a, an, one, each, every, this, that otherwise return the plural. Whitespace at the start and end is preserved. """ pre, word, post = self.partition_word(text) if not word: return text plural = self.postprocess( word, self._pl_special_verb(word, count) or self._pl_general_verb(word, count), ) return "{}{}{}".format(pre, plural, post)
['def', 'plural_verb', '(', 'self', ',', 'text', ',', 'count', '=', 'None', ')', ':', 'pre', ',', 'word', ',', 'post', '=', 'self', '.', 'partition_word', '(', 'text', ')', 'if', 'not', 'word', ':', 'return', 'text', 'plural', '=', 'self', '.', 'postprocess', '(', 'word', ',', 'self', '.', '_pl_special_verb', '(', 'word', ',', 'count', ')', 'or', 'self', '.', '_pl_general_verb', '(', 'word', ',', 'count', ')', ',', ')', 'return', '"{}{}{}"', '.', 'format', '(', 'pre', ',', 'plural', ',', 'post', ')']
Return the plural of text, where text is a verb. If count supplied, then return text if count is one of: 1, a, an, one, each, every, this, that otherwise return the plural. Whitespace at the start and end is preserved.
['Return', 'the', 'plural', 'of', 'text', 'where', 'text', 'is', 'a', 'verb', '.']
train
https://github.com/jazzband/inflect/blob/c2a3df74725990c195a5d7f37199de56873962e9/inflect.py#L2265-L2283
732
gumblex/zhconv
zhconv/zhconv.py
convtable2dict
def convtable2dict(convtable, locale, update=None): """ Convert a list of conversion dict to a dict for a certain locale. >>> sorted(convtable2dict([{'zh-hk': '列斯', 'zh-hans': '利兹', 'zh': '利兹', 'zh-tw': '里茲'}, {':uni': '巨集', 'zh-cn': '宏'}], 'zh-cn').items()) [('列斯', '利兹'), ('利兹', '利兹'), ('巨集', '宏'), ('里茲', '利兹')] """ rdict = update.copy() if update else {} for r in convtable: if ':uni' in r: if locale in r: rdict[r[':uni']] = r[locale] elif locale[:-1] == 'zh-han': if locale in r: for word in r.values(): rdict[word] = r[locale] else: v = fallback(locale, r) for word in r.values(): rdict[word] = v return rdict
python
def convtable2dict(convtable, locale, update=None): """ Convert a list of conversion dict to a dict for a certain locale. >>> sorted(convtable2dict([{'zh-hk': '列斯', 'zh-hans': '利兹', 'zh': '利兹', 'zh-tw': '里茲'}, {':uni': '巨集', 'zh-cn': '宏'}], 'zh-cn').items()) [('列斯', '利兹'), ('利兹', '利兹'), ('巨集', '宏'), ('里茲', '利兹')] """ rdict = update.copy() if update else {} for r in convtable: if ':uni' in r: if locale in r: rdict[r[':uni']] = r[locale] elif locale[:-1] == 'zh-han': if locale in r: for word in r.values(): rdict[word] = r[locale] else: v = fallback(locale, r) for word in r.values(): rdict[word] = v return rdict
['def', 'convtable2dict', '(', 'convtable', ',', 'locale', ',', 'update', '=', 'None', ')', ':', 'rdict', '=', 'update', '.', 'copy', '(', ')', 'if', 'update', 'else', '{', '}', 'for', 'r', 'in', 'convtable', ':', 'if', "':uni'", 'in', 'r', ':', 'if', 'locale', 'in', 'r', ':', 'rdict', '[', 'r', '[', "':uni'", ']', ']', '=', 'r', '[', 'locale', ']', 'elif', 'locale', '[', ':', '-', '1', ']', '==', "'zh-han'", ':', 'if', 'locale', 'in', 'r', ':', 'for', 'word', 'in', 'r', '.', 'values', '(', ')', ':', 'rdict', '[', 'word', ']', '=', 'r', '[', 'locale', ']', 'else', ':', 'v', '=', 'fallback', '(', 'locale', ',', 'r', ')', 'for', 'word', 'in', 'r', '.', 'values', '(', ')', ':', 'rdict', '[', 'word', ']', '=', 'v', 'return', 'rdict']
Convert a list of conversion dict to a dict for a certain locale. >>> sorted(convtable2dict([{'zh-hk': '列斯', 'zh-hans': '利兹', 'zh': '利兹', 'zh-tw': '里茲'}, {':uni': '巨集', 'zh-cn': '宏'}], 'zh-cn').items()) [('列斯', '利兹'), ('利兹', '利兹'), ('巨集', '宏'), ('里茲', '利兹')]
['Convert', 'a', 'list', 'of', 'conversion', 'dict', 'to', 'a', 'dict', 'for', 'a', 'certain', 'locale', '.']
train
https://github.com/gumblex/zhconv/blob/925c0f9494f3439bc05526e7e89bb5f0ab3d185e/zhconv/zhconv.py#L176-L196
733
acorg/dark-matter
bin/fasta-identity-table.py
collectData
def collectData(reads1, reads2, square, matchAmbiguous): """ Get pairwise matching statistics for two sets of reads. @param reads1: An C{OrderedDict} of C{str} read ids whose values are C{Read} instances. These will be the rows of the table. @param reads2: An C{OrderedDict} of C{str} read ids whose values are C{Read} instances. These will be the columns of the table. @param square: If C{True} we are making a square table of a set of sequences against themselves (in which case we show nothing on the diagonal). @param matchAmbiguous: If C{True}, count ambiguous nucleotides that are possibly correct as actually being correct. Otherwise, we are strict and insist that only non-ambiguous nucleotides can contribute to the matching nucleotide count. """ result = defaultdict(dict) for id1, read1 in reads1.items(): for id2, read2 in reads2.items(): if id1 != id2 or not square: match = compareDNAReads( read1, read2, matchAmbiguous=matchAmbiguous)['match'] if not matchAmbiguous: assert match['ambiguousMatchCount'] == 0 result[id1][id2] = result[id2][id1] = match return result
python
def collectData(reads1, reads2, square, matchAmbiguous): """ Get pairwise matching statistics for two sets of reads. @param reads1: An C{OrderedDict} of C{str} read ids whose values are C{Read} instances. These will be the rows of the table. @param reads2: An C{OrderedDict} of C{str} read ids whose values are C{Read} instances. These will be the columns of the table. @param square: If C{True} we are making a square table of a set of sequences against themselves (in which case we show nothing on the diagonal). @param matchAmbiguous: If C{True}, count ambiguous nucleotides that are possibly correct as actually being correct. Otherwise, we are strict and insist that only non-ambiguous nucleotides can contribute to the matching nucleotide count. """ result = defaultdict(dict) for id1, read1 in reads1.items(): for id2, read2 in reads2.items(): if id1 != id2 or not square: match = compareDNAReads( read1, read2, matchAmbiguous=matchAmbiguous)['match'] if not matchAmbiguous: assert match['ambiguousMatchCount'] == 0 result[id1][id2] = result[id2][id1] = match return result
['def', 'collectData', '(', 'reads1', ',', 'reads2', ',', 'square', ',', 'matchAmbiguous', ')', ':', 'result', '=', 'defaultdict', '(', 'dict', ')', 'for', 'id1', ',', 'read1', 'in', 'reads1', '.', 'items', '(', ')', ':', 'for', 'id2', ',', 'read2', 'in', 'reads2', '.', 'items', '(', ')', ':', 'if', 'id1', '!=', 'id2', 'or', 'not', 'square', ':', 'match', '=', 'compareDNAReads', '(', 'read1', ',', 'read2', ',', 'matchAmbiguous', '=', 'matchAmbiguous', ')', '[', "'match'", ']', 'if', 'not', 'matchAmbiguous', ':', 'assert', 'match', '[', "'ambiguousMatchCount'", ']', '==', '0', 'result', '[', 'id1', ']', '[', 'id2', ']', '=', 'result', '[', 'id2', ']', '[', 'id1', ']', '=', 'match', 'return', 'result']
Get pairwise matching statistics for two sets of reads. @param reads1: An C{OrderedDict} of C{str} read ids whose values are C{Read} instances. These will be the rows of the table. @param reads2: An C{OrderedDict} of C{str} read ids whose values are C{Read} instances. These will be the columns of the table. @param square: If C{True} we are making a square table of a set of sequences against themselves (in which case we show nothing on the diagonal). @param matchAmbiguous: If C{True}, count ambiguous nucleotides that are possibly correct as actually being correct. Otherwise, we are strict and insist that only non-ambiguous nucleotides can contribute to the matching nucleotide count.
['Get', 'pairwise', 'matching', 'statistics', 'for', 'two', 'sets', 'of', 'reads', '.']
train
https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/bin/fasta-identity-table.py#L182-L208
734
hydpy-dev/hydpy
hydpy/cythons/modelutils.py
PyxWriter.set_pointer0d
def set_pointer0d(subseqs): """Set_pointer function for 0-dimensional link sequences.""" print(' . set_pointer0d') lines = Lines() lines.add(1, 'cpdef inline set_pointer0d' '(self, str name, pointerutils.PDouble value):') for seq in subseqs: lines.add(2, 'if name == "%s":' % seq.name) lines.add(3, 'self.%s = value.p_value' % seq.name) return lines
python
def set_pointer0d(subseqs): """Set_pointer function for 0-dimensional link sequences.""" print(' . set_pointer0d') lines = Lines() lines.add(1, 'cpdef inline set_pointer0d' '(self, str name, pointerutils.PDouble value):') for seq in subseqs: lines.add(2, 'if name == "%s":' % seq.name) lines.add(3, 'self.%s = value.p_value' % seq.name) return lines
['def', 'set_pointer0d', '(', 'subseqs', ')', ':', 'print', '(', "' . set_pointer0d'", ')', 'lines', '=', 'Lines', '(', ')', 'lines', '.', 'add', '(', '1', ',', "'cpdef inline set_pointer0d'", "'(self, str name, pointerutils.PDouble value):'", ')', 'for', 'seq', 'in', 'subseqs', ':', 'lines', '.', 'add', '(', '2', ',', '\'if name == "%s":\'', '%', 'seq', '.', 'name', ')', 'lines', '.', 'add', '(', '3', ',', "'self.%s = value.p_value'", '%', 'seq', '.', 'name', ')', 'return', 'lines']
Set_pointer function for 0-dimensional link sequences.
['Set_pointer', 'function', 'for', '0', '-', 'dimensional', 'link', 'sequences', '.']
train
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/cythons/modelutils.py#L570-L579
735
newville/wxmplot
wxmplot/stackedplotframe.py
StackedPlotFrame.onThemeColor
def onThemeColor(self, color, item): """pass theme colors to bottom panel""" bconf = self.panel_bot.conf if item == 'grid': bconf.set_gridcolor(color) elif item == 'bg': bconf.set_bgcolor(color) elif item == 'frame': bconf.set_framecolor(color) elif item == 'text': bconf.set_textcolor(color) bconf.canvas.draw()
python
def onThemeColor(self, color, item): """pass theme colors to bottom panel""" bconf = self.panel_bot.conf if item == 'grid': bconf.set_gridcolor(color) elif item == 'bg': bconf.set_bgcolor(color) elif item == 'frame': bconf.set_framecolor(color) elif item == 'text': bconf.set_textcolor(color) bconf.canvas.draw()
['def', 'onThemeColor', '(', 'self', ',', 'color', ',', 'item', ')', ':', 'bconf', '=', 'self', '.', 'panel_bot', '.', 'conf', 'if', 'item', '==', "'grid'", ':', 'bconf', '.', 'set_gridcolor', '(', 'color', ')', 'elif', 'item', '==', "'bg'", ':', 'bconf', '.', 'set_bgcolor', '(', 'color', ')', 'elif', 'item', '==', "'frame'", ':', 'bconf', '.', 'set_framecolor', '(', 'color', ')', 'elif', 'item', '==', "'text'", ':', 'bconf', '.', 'set_textcolor', '(', 'color', ')', 'bconf', '.', 'canvas', '.', 'draw', '(', ')']
pass theme colors to bottom panel
['pass', 'theme', 'colors', 'to', 'bottom', 'panel']
train
https://github.com/newville/wxmplot/blob/8e0dc037453e5cdf18c968dc5a3d29efd761edee/wxmplot/stackedplotframe.py#L210-L221
736
python-openxml/python-docx
docx/opc/pkgreader.py
PackageReader.from_file
def from_file(pkg_file): """ Return a |PackageReader| instance loaded with contents of *pkg_file*. """ phys_reader = PhysPkgReader(pkg_file) content_types = _ContentTypeMap.from_xml(phys_reader.content_types_xml) pkg_srels = PackageReader._srels_for(phys_reader, PACKAGE_URI) sparts = PackageReader._load_serialized_parts( phys_reader, pkg_srels, content_types ) phys_reader.close() return PackageReader(content_types, pkg_srels, sparts)
python
def from_file(pkg_file): """ Return a |PackageReader| instance loaded with contents of *pkg_file*. """ phys_reader = PhysPkgReader(pkg_file) content_types = _ContentTypeMap.from_xml(phys_reader.content_types_xml) pkg_srels = PackageReader._srels_for(phys_reader, PACKAGE_URI) sparts = PackageReader._load_serialized_parts( phys_reader, pkg_srels, content_types ) phys_reader.close() return PackageReader(content_types, pkg_srels, sparts)
['def', 'from_file', '(', 'pkg_file', ')', ':', 'phys_reader', '=', 'PhysPkgReader', '(', 'pkg_file', ')', 'content_types', '=', '_ContentTypeMap', '.', 'from_xml', '(', 'phys_reader', '.', 'content_types_xml', ')', 'pkg_srels', '=', 'PackageReader', '.', '_srels_for', '(', 'phys_reader', ',', 'PACKAGE_URI', ')', 'sparts', '=', 'PackageReader', '.', '_load_serialized_parts', '(', 'phys_reader', ',', 'pkg_srels', ',', 'content_types', ')', 'phys_reader', '.', 'close', '(', ')', 'return', 'PackageReader', '(', 'content_types', ',', 'pkg_srels', ',', 'sparts', ')']
Return a |PackageReader| instance loaded with contents of *pkg_file*.
['Return', 'a', '|PackageReader|', 'instance', 'loaded', 'with', 'contents', 'of', '*', 'pkg_file', '*', '.']
train
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/opc/pkgreader.py#L28-L39
737
mgedmin/imgdiff
imgdiff.py
slow_highlight
def slow_highlight(img1, img2, opts): """Try to find similar areas between two images. Produces two masks for img1 and img2. The algorithm works by comparing every possible alignment of the images, smoothing it a bit to reduce spurious matches in areas that are perceptibly different (e.g. text), and then taking the point-wise minimum of all those difference maps. This way if you insert a few pixel rows/columns into an image, similar areas should match even if different areas need to be aligned with different shifts. As you can imagine, this brute-force approach can be pretty slow, if there are many possible alignments. The closer the images are in size, the faster this will work. If would work better if it could compare alignments that go beyond the outer boundaries of the images, in case some pixels got shifted closer to an edge. """ w1, h1 = img1.size w2, h2 = img2.size W, H = max(w1, w2), max(h1, h2) pimg1 = Image.new('RGB', (W, H), opts.bgcolor) pimg2 = Image.new('RGB', (W, H), opts.bgcolor) pimg1.paste(img1, (0, 0)) pimg2.paste(img2, (0, 0)) diff = Image.new('L', (W, H), 255) # It is not a good idea to keep one diff image; it should track the # relative positions of the two images. I think that's what explains # the fuzz I see near the edges of the different areas. xr = abs(w1 - w2) + 1 yr = abs(h1 - h2) + 1 try: p = Progress(xr * yr, timeout=opts.timeout) for x in range(xr): for y in range(yr): p.next() this = ImageChops.difference(pimg1, pimg2).convert('L') this = this.filter(ImageFilter.MaxFilter(7)) diff = ImageChops.darker(diff, this) if h1 > h2: pimg2 = ImageChops.offset(pimg2, 0, 1) else: pimg1 = ImageChops.offset(pimg1, 0, 1) if h1 > h2: pimg2 = ImageChops.offset(pimg2, 0, -yr) else: pimg1 = ImageChops.offset(pimg1, 0, -yr) if w1 > w2: pimg2 = ImageChops.offset(pimg2, 1, 0) else: pimg1 = ImageChops.offset(pimg1, 1, 0) except KeyboardInterrupt: return None, None diff = diff.filter(ImageFilter.MaxFilter(5)) diff1 = diff.crop((0, 0, w1, h1)) diff2 = diff.crop((0, 0, w2, h2)) mask1 = tweak_diff(diff1, opts.opacity) mask2 = tweak_diff(diff2, opts.opacity) return mask1, mask2
python
def slow_highlight(img1, img2, opts): """Try to find similar areas between two images. Produces two masks for img1 and img2. The algorithm works by comparing every possible alignment of the images, smoothing it a bit to reduce spurious matches in areas that are perceptibly different (e.g. text), and then taking the point-wise minimum of all those difference maps. This way if you insert a few pixel rows/columns into an image, similar areas should match even if different areas need to be aligned with different shifts. As you can imagine, this brute-force approach can be pretty slow, if there are many possible alignments. The closer the images are in size, the faster this will work. If would work better if it could compare alignments that go beyond the outer boundaries of the images, in case some pixels got shifted closer to an edge. """ w1, h1 = img1.size w2, h2 = img2.size W, H = max(w1, w2), max(h1, h2) pimg1 = Image.new('RGB', (W, H), opts.bgcolor) pimg2 = Image.new('RGB', (W, H), opts.bgcolor) pimg1.paste(img1, (0, 0)) pimg2.paste(img2, (0, 0)) diff = Image.new('L', (W, H), 255) # It is not a good idea to keep one diff image; it should track the # relative positions of the two images. I think that's what explains # the fuzz I see near the edges of the different areas. xr = abs(w1 - w2) + 1 yr = abs(h1 - h2) + 1 try: p = Progress(xr * yr, timeout=opts.timeout) for x in range(xr): for y in range(yr): p.next() this = ImageChops.difference(pimg1, pimg2).convert('L') this = this.filter(ImageFilter.MaxFilter(7)) diff = ImageChops.darker(diff, this) if h1 > h2: pimg2 = ImageChops.offset(pimg2, 0, 1) else: pimg1 = ImageChops.offset(pimg1, 0, 1) if h1 > h2: pimg2 = ImageChops.offset(pimg2, 0, -yr) else: pimg1 = ImageChops.offset(pimg1, 0, -yr) if w1 > w2: pimg2 = ImageChops.offset(pimg2, 1, 0) else: pimg1 = ImageChops.offset(pimg1, 1, 0) except KeyboardInterrupt: return None, None diff = diff.filter(ImageFilter.MaxFilter(5)) diff1 = diff.crop((0, 0, w1, h1)) diff2 = diff.crop((0, 0, w2, h2)) mask1 = tweak_diff(diff1, opts.opacity) mask2 = tweak_diff(diff2, opts.opacity) return mask1, mask2
['def', 'slow_highlight', '(', 'img1', ',', 'img2', ',', 'opts', ')', ':', 'w1', ',', 'h1', '=', 'img1', '.', 'size', 'w2', ',', 'h2', '=', 'img2', '.', 'size', 'W', ',', 'H', '=', 'max', '(', 'w1', ',', 'w2', ')', ',', 'max', '(', 'h1', ',', 'h2', ')', 'pimg1', '=', 'Image', '.', 'new', '(', "'RGB'", ',', '(', 'W', ',', 'H', ')', ',', 'opts', '.', 'bgcolor', ')', 'pimg2', '=', 'Image', '.', 'new', '(', "'RGB'", ',', '(', 'W', ',', 'H', ')', ',', 'opts', '.', 'bgcolor', ')', 'pimg1', '.', 'paste', '(', 'img1', ',', '(', '0', ',', '0', ')', ')', 'pimg2', '.', 'paste', '(', 'img2', ',', '(', '0', ',', '0', ')', ')', 'diff', '=', 'Image', '.', 'new', '(', "'L'", ',', '(', 'W', ',', 'H', ')', ',', '255', ')', '# It is not a good idea to keep one diff image; it should track the', "# relative positions of the two images. I think that's what explains", '# the fuzz I see near the edges of the different areas.', 'xr', '=', 'abs', '(', 'w1', '-', 'w2', ')', '+', '1', 'yr', '=', 'abs', '(', 'h1', '-', 'h2', ')', '+', '1', 'try', ':', 'p', '=', 'Progress', '(', 'xr', '*', 'yr', ',', 'timeout', '=', 'opts', '.', 'timeout', ')', 'for', 'x', 'in', 'range', '(', 'xr', ')', ':', 'for', 'y', 'in', 'range', '(', 'yr', ')', ':', 'p', '.', 'next', '(', ')', 'this', '=', 'ImageChops', '.', 'difference', '(', 'pimg1', ',', 'pimg2', ')', '.', 'convert', '(', "'L'", ')', 'this', '=', 'this', '.', 'filter', '(', 'ImageFilter', '.', 'MaxFilter', '(', '7', ')', ')', 'diff', '=', 'ImageChops', '.', 'darker', '(', 'diff', ',', 'this', ')', 'if', 'h1', '>', 'h2', ':', 'pimg2', '=', 'ImageChops', '.', 'offset', '(', 'pimg2', ',', '0', ',', '1', ')', 'else', ':', 'pimg1', '=', 'ImageChops', '.', 'offset', '(', 'pimg1', ',', '0', ',', '1', ')', 'if', 'h1', '>', 'h2', ':', 'pimg2', '=', 'ImageChops', '.', 'offset', '(', 'pimg2', ',', '0', ',', '-', 'yr', ')', 'else', ':', 'pimg1', '=', 'ImageChops', '.', 'offset', '(', 'pimg1', ',', '0', ',', '-', 'yr', ')', 'if', 'w1', '>', 'w2', ':', 'pimg2', '=', 'ImageChops', '.', 'offset', '(', 'pimg2', ',', '1', ',', '0', ')', 'else', ':', 'pimg1', '=', 'ImageChops', '.', 'offset', '(', 'pimg1', ',', '1', ',', '0', ')', 'except', 'KeyboardInterrupt', ':', 'return', 'None', ',', 'None', 'diff', '=', 'diff', '.', 'filter', '(', 'ImageFilter', '.', 'MaxFilter', '(', '5', ')', ')', 'diff1', '=', 'diff', '.', 'crop', '(', '(', '0', ',', '0', ',', 'w1', ',', 'h1', ')', ')', 'diff2', '=', 'diff', '.', 'crop', '(', '(', '0', ',', '0', ',', 'w2', ',', 'h2', ')', ')', 'mask1', '=', 'tweak_diff', '(', 'diff1', ',', 'opts', '.', 'opacity', ')', 'mask2', '=', 'tweak_diff', '(', 'diff2', ',', 'opts', '.', 'opacity', ')', 'return', 'mask1', ',', 'mask2']
Try to find similar areas between two images. Produces two masks for img1 and img2. The algorithm works by comparing every possible alignment of the images, smoothing it a bit to reduce spurious matches in areas that are perceptibly different (e.g. text), and then taking the point-wise minimum of all those difference maps. This way if you insert a few pixel rows/columns into an image, similar areas should match even if different areas need to be aligned with different shifts. As you can imagine, this brute-force approach can be pretty slow, if there are many possible alignments. The closer the images are in size, the faster this will work. If would work better if it could compare alignments that go beyond the outer boundaries of the images, in case some pixels got shifted closer to an edge.
['Try', 'to', 'find', 'similar', 'areas', 'between', 'two', 'images', '.']
train
https://github.com/mgedmin/imgdiff/blob/f80b173c6fb1f32f3e016d153b5b84a14d966e1a/imgdiff.py#L481-L552
738
scott-maddox/openbandparams
src/openbandparams/alloy.py
Alloy.add_parameter
def add_parameter(self, parameter, overload=False): ''' Adds a `Parameter` object to the instance. If a `Parameter` with the same name or alias has already been added and `overload` is False (the default), a `ValueError` is thrown. If a class member or method with the same name or alias is already defined, a `ValueError` is thrown, regardless of the value of overload. ''' if not isinstance(parameter, Parameter): raise TypeError('`parameter` must be an instance of `Parameter`') if hasattr(self, parameter.name): item = getattr(self, parameter.name) if not isinstance(item, Parameter): raise ValueError('"{}" is already a class member or method.' ''.format(parameter.name)) elif not overload: raise ValueError('Parameter "{}" has already been added' ' and overload is False.' ''.format(parameter.name)) if parameter.name in self._parameters and not overload: raise ValueError('Parameter "{}" has already been added' ' and overload is False.' ''.format(parameter.name)) for alias in parameter.aliases: if alias in self._aliases and not overload: raise ValueError('Alias "{}" has already been added' ' and overload is False.' ''.format(parameter.name)) self._add_parameter(parameter)
python
def add_parameter(self, parameter, overload=False): ''' Adds a `Parameter` object to the instance. If a `Parameter` with the same name or alias has already been added and `overload` is False (the default), a `ValueError` is thrown. If a class member or method with the same name or alias is already defined, a `ValueError` is thrown, regardless of the value of overload. ''' if not isinstance(parameter, Parameter): raise TypeError('`parameter` must be an instance of `Parameter`') if hasattr(self, parameter.name): item = getattr(self, parameter.name) if not isinstance(item, Parameter): raise ValueError('"{}" is already a class member or method.' ''.format(parameter.name)) elif not overload: raise ValueError('Parameter "{}" has already been added' ' and overload is False.' ''.format(parameter.name)) if parameter.name in self._parameters and not overload: raise ValueError('Parameter "{}" has already been added' ' and overload is False.' ''.format(parameter.name)) for alias in parameter.aliases: if alias in self._aliases and not overload: raise ValueError('Alias "{}" has already been added' ' and overload is False.' ''.format(parameter.name)) self._add_parameter(parameter)
['def', 'add_parameter', '(', 'self', ',', 'parameter', ',', 'overload', '=', 'False', ')', ':', 'if', 'not', 'isinstance', '(', 'parameter', ',', 'Parameter', ')', ':', 'raise', 'TypeError', '(', "'`parameter` must be an instance of `Parameter`'", ')', 'if', 'hasattr', '(', 'self', ',', 'parameter', '.', 'name', ')', ':', 'item', '=', 'getattr', '(', 'self', ',', 'parameter', '.', 'name', ')', 'if', 'not', 'isinstance', '(', 'item', ',', 'Parameter', ')', ':', 'raise', 'ValueError', '(', '\'"{}" is already a class member or method.\'', "''", '.', 'format', '(', 'parameter', '.', 'name', ')', ')', 'elif', 'not', 'overload', ':', 'raise', 'ValueError', '(', '\'Parameter "{}" has already been added\'', "' and overload is False.'", "''", '.', 'format', '(', 'parameter', '.', 'name', ')', ')', 'if', 'parameter', '.', 'name', 'in', 'self', '.', '_parameters', 'and', 'not', 'overload', ':', 'raise', 'ValueError', '(', '\'Parameter "{}" has already been added\'', "' and overload is False.'", "''", '.', 'format', '(', 'parameter', '.', 'name', ')', ')', 'for', 'alias', 'in', 'parameter', '.', 'aliases', ':', 'if', 'alias', 'in', 'self', '.', '_aliases', 'and', 'not', 'overload', ':', 'raise', 'ValueError', '(', '\'Alias "{}" has already been added\'', "' and overload is False.'", "''", '.', 'format', '(', 'parameter', '.', 'name', ')', ')', 'self', '.', '_add_parameter', '(', 'parameter', ')']
Adds a `Parameter` object to the instance. If a `Parameter` with the same name or alias has already been added and `overload` is False (the default), a `ValueError` is thrown. If a class member or method with the same name or alias is already defined, a `ValueError` is thrown, regardless of the value of overload.
['Adds', 'a', 'Parameter', 'object', 'to', 'the', 'instance', '.', 'If', 'a', 'Parameter', 'with', 'the', 'same', 'name', 'or', 'alias', 'has', 'already', 'been', 'added', 'and', 'overload', 'is', 'False', '(', 'the', 'default', ')', 'a', 'ValueError', 'is', 'thrown', '.', 'If', 'a', 'class', 'member', 'or', 'method', 'with', 'the', 'same', 'name', 'or', 'alias', 'is', 'already', 'defined', 'a', 'ValueError', 'is', 'thrown', 'regardless', 'of', 'the', 'value', 'of', 'overload', '.']
train
https://github.com/scott-maddox/openbandparams/blob/bc24e59187326bcb8948117434536082c9055777/src/openbandparams/alloy.py#L93-L124
739
log2timeline/plaso
plaso/parsers/esedb_plugins/msie_webcache.py
MsieWebCacheESEDBPlugin.ParsePartitionsTable
def ParsePartitionsTable( self, parser_mediator, database=None, table=None, **unused_kwargs): """Parses the Partitions table. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. database (Optional[pyesedb.file]): ESE database. table (Optional[pyesedb.table]): table. Raises: ValueError: if the database or table value is missing. """ if database is None: raise ValueError('Missing database value.') if table is None: raise ValueError('Missing table value.') for esedb_record in table.records: if parser_mediator.abort: break record_values = self._GetRecordValues( parser_mediator, table.name, esedb_record) event_data = MsieWebCachePartitionsEventData() event_data.directory = record_values.get('Directory', None) event_data.partition_identifier = record_values.get('PartitionId', None) event_data.partition_type = record_values.get('PartitionType', None) event_data.table_identifier = record_values.get('TableId', None) timestamp = record_values.get('LastScavengeTime', None) if timestamp: date_time = dfdatetime_filetime.Filetime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, 'Last Scavenge Time') parser_mediator.ProduceEventWithEventData(event, event_data)
python
def ParsePartitionsTable( self, parser_mediator, database=None, table=None, **unused_kwargs): """Parses the Partitions table. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. database (Optional[pyesedb.file]): ESE database. table (Optional[pyesedb.table]): table. Raises: ValueError: if the database or table value is missing. """ if database is None: raise ValueError('Missing database value.') if table is None: raise ValueError('Missing table value.') for esedb_record in table.records: if parser_mediator.abort: break record_values = self._GetRecordValues( parser_mediator, table.name, esedb_record) event_data = MsieWebCachePartitionsEventData() event_data.directory = record_values.get('Directory', None) event_data.partition_identifier = record_values.get('PartitionId', None) event_data.partition_type = record_values.get('PartitionType', None) event_data.table_identifier = record_values.get('TableId', None) timestamp = record_values.get('LastScavengeTime', None) if timestamp: date_time = dfdatetime_filetime.Filetime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, 'Last Scavenge Time') parser_mediator.ProduceEventWithEventData(event, event_data)
['def', 'ParsePartitionsTable', '(', 'self', ',', 'parser_mediator', ',', 'database', '=', 'None', ',', 'table', '=', 'None', ',', '*', '*', 'unused_kwargs', ')', ':', 'if', 'database', 'is', 'None', ':', 'raise', 'ValueError', '(', "'Missing database value.'", ')', 'if', 'table', 'is', 'None', ':', 'raise', 'ValueError', '(', "'Missing table value.'", ')', 'for', 'esedb_record', 'in', 'table', '.', 'records', ':', 'if', 'parser_mediator', '.', 'abort', ':', 'break', 'record_values', '=', 'self', '.', '_GetRecordValues', '(', 'parser_mediator', ',', 'table', '.', 'name', ',', 'esedb_record', ')', 'event_data', '=', 'MsieWebCachePartitionsEventData', '(', ')', 'event_data', '.', 'directory', '=', 'record_values', '.', 'get', '(', "'Directory'", ',', 'None', ')', 'event_data', '.', 'partition_identifier', '=', 'record_values', '.', 'get', '(', "'PartitionId'", ',', 'None', ')', 'event_data', '.', 'partition_type', '=', 'record_values', '.', 'get', '(', "'PartitionType'", ',', 'None', ')', 'event_data', '.', 'table_identifier', '=', 'record_values', '.', 'get', '(', "'TableId'", ',', 'None', ')', 'timestamp', '=', 'record_values', '.', 'get', '(', "'LastScavengeTime'", ',', 'None', ')', 'if', 'timestamp', ':', 'date_time', '=', 'dfdatetime_filetime', '.', 'Filetime', '(', 'timestamp', '=', 'timestamp', ')', 'event', '=', 'time_events', '.', 'DateTimeValuesEvent', '(', 'date_time', ',', "'Last Scavenge Time'", ')', 'parser_mediator', '.', 'ProduceEventWithEventData', '(', 'event', ',', 'event_data', ')']
Parses the Partitions table. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. database (Optional[pyesedb.file]): ESE database. table (Optional[pyesedb.table]): table. Raises: ValueError: if the database or table value is missing.
['Parses', 'the', 'Partitions', 'table', '.']
train
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/esedb_plugins/msie_webcache.py#L358-L395
740
pulumi/pulumi
sdk/python/lib/pulumi/config.py
Config.require
def require(self, key: str) -> str: """ Returns a configuration value by its given key. If it doesn't exist, an error is thrown. :param str key: The requested configuration key. :return: The configuration key's value. :rtype: str :raises ConfigMissingError: The configuration value did not exist. """ v = self.get(key) if v is None: raise ConfigMissingError(self.full_key(key)) return v
python
def require(self, key: str) -> str: """ Returns a configuration value by its given key. If it doesn't exist, an error is thrown. :param str key: The requested configuration key. :return: The configuration key's value. :rtype: str :raises ConfigMissingError: The configuration value did not exist. """ v = self.get(key) if v is None: raise ConfigMissingError(self.full_key(key)) return v
['def', 'require', '(', 'self', ',', 'key', ':', 'str', ')', '->', 'str', ':', 'v', '=', 'self', '.', 'get', '(', 'key', ')', 'if', 'v', 'is', 'None', ':', 'raise', 'ConfigMissingError', '(', 'self', '.', 'full_key', '(', 'key', ')', ')', 'return', 'v']
Returns a configuration value by its given key. If it doesn't exist, an error is thrown. :param str key: The requested configuration key. :return: The configuration key's value. :rtype: str :raises ConfigMissingError: The configuration value did not exist.
['Returns', 'a', 'configuration', 'value', 'by', 'its', 'given', 'key', '.', 'If', 'it', 'doesn', 't', 'exist', 'an', 'error', 'is', 'thrown', '.']
train
https://github.com/pulumi/pulumi/blob/95d51efe6ab9a533838b6d83aa240b5f912e72aa/sdk/python/lib/pulumi/config.py#L115-L127
741
sethmlarson/virtualbox-python
virtualbox/library.py
IMachineDebugger.unload_plug_in
def unload_plug_in(self, name): """Unloads a DBGF plug-in. in name of type str The plug-in name or DLL. Special name 'all' unloads all plug-ins. """ if not isinstance(name, basestring): raise TypeError("name can only be an instance of type basestring") self._call("unloadPlugIn", in_p=[name])
python
def unload_plug_in(self, name): """Unloads a DBGF plug-in. in name of type str The plug-in name or DLL. Special name 'all' unloads all plug-ins. """ if not isinstance(name, basestring): raise TypeError("name can only be an instance of type basestring") self._call("unloadPlugIn", in_p=[name])
['def', 'unload_plug_in', '(', 'self', ',', 'name', ')', ':', 'if', 'not', 'isinstance', '(', 'name', ',', 'basestring', ')', ':', 'raise', 'TypeError', '(', '"name can only be an instance of type basestring"', ')', 'self', '.', '_call', '(', '"unloadPlugIn"', ',', 'in_p', '=', '[', 'name', ']', ')']
Unloads a DBGF plug-in. in name of type str The plug-in name or DLL. Special name 'all' unloads all plug-ins.
['Unloads', 'a', 'DBGF', 'plug', '-', 'in', '.']
train
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L26606-L26616
742
jendrikseipp/vulture
vulture/lines.py
get_last_line_number
def get_last_line_number(node): """Estimate last line number of the given AST node. The estimate is based on the line number of the last descendant of `node` that has a lineno attribute. Therefore, it underestimates the size of code ending with, e.g., multiline strings and comments. When traversing the tree, we may see a mix of nodes with line numbers and nodes without line numbers. We therefore, store the maximum line number seen so far and report it at the end. A more accurate (but also slower to compute) estimate would traverse all children, instead of just the last one, since choosing the last one may lead to a path that ends with a node without line number. """ max_lineno = node.lineno while True: last_child = _get_last_child_with_lineno(node) if last_child is None: return max_lineno else: try: max_lineno = max(max_lineno, last_child.lineno) except AttributeError: pass node = last_child
python
def get_last_line_number(node): """Estimate last line number of the given AST node. The estimate is based on the line number of the last descendant of `node` that has a lineno attribute. Therefore, it underestimates the size of code ending with, e.g., multiline strings and comments. When traversing the tree, we may see a mix of nodes with line numbers and nodes without line numbers. We therefore, store the maximum line number seen so far and report it at the end. A more accurate (but also slower to compute) estimate would traverse all children, instead of just the last one, since choosing the last one may lead to a path that ends with a node without line number. """ max_lineno = node.lineno while True: last_child = _get_last_child_with_lineno(node) if last_child is None: return max_lineno else: try: max_lineno = max(max_lineno, last_child.lineno) except AttributeError: pass node = last_child
['def', 'get_last_line_number', '(', 'node', ')', ':', 'max_lineno', '=', 'node', '.', 'lineno', 'while', 'True', ':', 'last_child', '=', '_get_last_child_with_lineno', '(', 'node', ')', 'if', 'last_child', 'is', 'None', ':', 'return', 'max_lineno', 'else', ':', 'try', ':', 'max_lineno', '=', 'max', '(', 'max_lineno', ',', 'last_child', '.', 'lineno', ')', 'except', 'AttributeError', ':', 'pass', 'node', '=', 'last_child']
Estimate last line number of the given AST node. The estimate is based on the line number of the last descendant of `node` that has a lineno attribute. Therefore, it underestimates the size of code ending with, e.g., multiline strings and comments. When traversing the tree, we may see a mix of nodes with line numbers and nodes without line numbers. We therefore, store the maximum line number seen so far and report it at the end. A more accurate (but also slower to compute) estimate would traverse all children, instead of just the last one, since choosing the last one may lead to a path that ends with a node without line number.
['Estimate', 'last', 'line', 'number', 'of', 'the', 'given', 'AST', 'node', '.']
train
https://github.com/jendrikseipp/vulture/blob/fed11fb7e7ed065058a9fb1acd10052ece37f984/vulture/lines.py#L40-L65
743
RIPE-NCC/ripe-atlas-cousteau
ripe/atlas/cousteau/source.py
AtlasChangeSource.clean
def clean(self): """ Cleans/checks user has entered all required attributes. This might save some queries from being sent to server if they are totally wrong. """ if not all([self._type, self._requested, self._value, self._action]): raise MalFormattedSource( "<type, requested, value, action> fields are required." )
python
def clean(self): """ Cleans/checks user has entered all required attributes. This might save some queries from being sent to server if they are totally wrong. """ if not all([self._type, self._requested, self._value, self._action]): raise MalFormattedSource( "<type, requested, value, action> fields are required." )
['def', 'clean', '(', 'self', ')', ':', 'if', 'not', 'all', '(', '[', 'self', '.', '_type', ',', 'self', '.', '_requested', ',', 'self', '.', '_value', ',', 'self', '.', '_action', ']', ')', ':', 'raise', 'MalFormattedSource', '(', '"<type, requested, value, action> fields are required."', ')']
Cleans/checks user has entered all required attributes. This might save some queries from being sent to server if they are totally wrong.
['Cleans', '/', 'checks', 'user', 'has', 'entered', 'all', 'required', 'attributes', '.', 'This', 'might', 'save', 'some', 'queries', 'from', 'being', 'sent', 'to', 'server', 'if', 'they', 'are', 'totally', 'wrong', '.']
train
https://github.com/RIPE-NCC/ripe-atlas-cousteau/blob/ffee2556aaa4df86525b88c269bb098de11678ec/ripe/atlas/cousteau/source.py#L216-L224
744
AndresMWeber/Nomenclate
nomenclate/core/nameparser.py
NameParser._generic_search
def _generic_search(cls, name, search_string, metadata={}, ignore=''): """ Searches for a specific string given three types of regex search types. Also auto-checks for camel casing. :param name: str, name of object in question :param search_string: str, string to find and insert into the search regexes :param metadata: dict, metadata to add to the result if we find a match :param ignore: str, ignore specific string for the search :return: dict, dictionary of search results """ patterns = [cls.REGEX_ABBR_SEOS, cls.REGEX_ABBR_ISLAND, cls.REGEX_ABBR_CAMEL] if not search_string[0].isupper(): patterns.remove(cls.REGEX_ABBR_CAMEL) for pattern in patterns: search_result = cls._get_regex_search(name, pattern.format(ABBR=search_string, SEP=cls.REGEX_SEPARATORS), metadata=metadata, match_index=0, ignore=ignore) if search_result is not None: if cls.is_valid_camel(search_result.get('match_full'), strcmp=search_result.get('match')): return search_result return None
python
def _generic_search(cls, name, search_string, metadata={}, ignore=''): """ Searches for a specific string given three types of regex search types. Also auto-checks for camel casing. :param name: str, name of object in question :param search_string: str, string to find and insert into the search regexes :param metadata: dict, metadata to add to the result if we find a match :param ignore: str, ignore specific string for the search :return: dict, dictionary of search results """ patterns = [cls.REGEX_ABBR_SEOS, cls.REGEX_ABBR_ISLAND, cls.REGEX_ABBR_CAMEL] if not search_string[0].isupper(): patterns.remove(cls.REGEX_ABBR_CAMEL) for pattern in patterns: search_result = cls._get_regex_search(name, pattern.format(ABBR=search_string, SEP=cls.REGEX_SEPARATORS), metadata=metadata, match_index=0, ignore=ignore) if search_result is not None: if cls.is_valid_camel(search_result.get('match_full'), strcmp=search_result.get('match')): return search_result return None
['def', '_generic_search', '(', 'cls', ',', 'name', ',', 'search_string', ',', 'metadata', '=', '{', '}', ',', 'ignore', '=', "''", ')', ':', 'patterns', '=', '[', 'cls', '.', 'REGEX_ABBR_SEOS', ',', 'cls', '.', 'REGEX_ABBR_ISLAND', ',', 'cls', '.', 'REGEX_ABBR_CAMEL', ']', 'if', 'not', 'search_string', '[', '0', ']', '.', 'isupper', '(', ')', ':', 'patterns', '.', 'remove', '(', 'cls', '.', 'REGEX_ABBR_CAMEL', ')', 'for', 'pattern', 'in', 'patterns', ':', 'search_result', '=', 'cls', '.', '_get_regex_search', '(', 'name', ',', 'pattern', '.', 'format', '(', 'ABBR', '=', 'search_string', ',', 'SEP', '=', 'cls', '.', 'REGEX_SEPARATORS', ')', ',', 'metadata', '=', 'metadata', ',', 'match_index', '=', '0', ',', 'ignore', '=', 'ignore', ')', 'if', 'search_result', 'is', 'not', 'None', ':', 'if', 'cls', '.', 'is_valid_camel', '(', 'search_result', '.', 'get', '(', "'match_full'", ')', ',', 'strcmp', '=', 'search_result', '.', 'get', '(', "'match'", ')', ')', ':', 'return', 'search_result', 'return', 'None']
Searches for a specific string given three types of regex search types. Also auto-checks for camel casing. :param name: str, name of object in question :param search_string: str, string to find and insert into the search regexes :param metadata: dict, metadata to add to the result if we find a match :param ignore: str, ignore specific string for the search :return: dict, dictionary of search results
['Searches', 'for', 'a', 'specific', 'string', 'given', 'three', 'types', 'of', 'regex', 'search', 'types', '.', 'Also', 'auto', '-', 'checks', 'for', 'camel', 'casing', '.']
train
https://github.com/AndresMWeber/Nomenclate/blob/e6d6fc28beac042bad588e56fbe77531d2de6b6f/nomenclate/core/nameparser.py#L344-L369
745
jxtech/wechatpy
wechatpy/client/api/shakearound.py
WeChatShakeAround.update_device
def update_device(self, device_id=None, uuid=None, major=None, minor=None, comment=None): """ 更新设备信息 详情请参考 http://mp.weixin.qq.com/wiki/15/b9e012f917e3484b7ed02771156411f3.html :param device_id: 设备编号,若填了UUID、major、minor,则可不填设备编号,若二者都填,则以设备编号为优先 :param uuid: UUID :param major: major :param minor: minor :param comment: 设备的备注信息,不超过15个汉字或30个英文字母。 :return: 返回的 JSON 数据包 """ data = optionaldict() data['comment'] = comment data['device_identifier'] = { 'device_id': device_id, 'uuid': uuid, 'major': major, 'minor': minor } return self._post( 'shakearound/device/update', data=data )
python
def update_device(self, device_id=None, uuid=None, major=None, minor=None, comment=None): """ 更新设备信息 详情请参考 http://mp.weixin.qq.com/wiki/15/b9e012f917e3484b7ed02771156411f3.html :param device_id: 设备编号,若填了UUID、major、minor,则可不填设备编号,若二者都填,则以设备编号为优先 :param uuid: UUID :param major: major :param minor: minor :param comment: 设备的备注信息,不超过15个汉字或30个英文字母。 :return: 返回的 JSON 数据包 """ data = optionaldict() data['comment'] = comment data['device_identifier'] = { 'device_id': device_id, 'uuid': uuid, 'major': major, 'minor': minor } return self._post( 'shakearound/device/update', data=data )
['def', 'update_device', '(', 'self', ',', 'device_id', '=', 'None', ',', 'uuid', '=', 'None', ',', 'major', '=', 'None', ',', 'minor', '=', 'None', ',', 'comment', '=', 'None', ')', ':', 'data', '=', 'optionaldict', '(', ')', 'data', '[', "'comment'", ']', '=', 'comment', 'data', '[', "'device_identifier'", ']', '=', '{', "'device_id'", ':', 'device_id', ',', "'uuid'", ':', 'uuid', ',', "'major'", ':', 'major', ',', "'minor'", ':', 'minor', '}', 'return', 'self', '.', '_post', '(', "'shakearound/device/update'", ',', 'data', '=', 'data', ')']
更新设备信息 详情请参考 http://mp.weixin.qq.com/wiki/15/b9e012f917e3484b7ed02771156411f3.html :param device_id: 设备编号,若填了UUID、major、minor,则可不填设备编号,若二者都填,则以设备编号为优先 :param uuid: UUID :param major: major :param minor: minor :param comment: 设备的备注信息,不超过15个汉字或30个英文字母。 :return: 返回的 JSON 数据包
['更新设备信息', '详情请参考', 'http', ':', '//', 'mp', '.', 'weixin', '.', 'qq', '.', 'com', '/', 'wiki', '/', '15', '/', 'b9e012f917e3484b7ed02771156411f3', '.', 'html']
train
https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/client/api/shakearound.py#L49-L74
746
seequent/properties
properties/math.py
Array.to_json
def to_json(value, **kwargs): """Convert array to JSON list nan values are converted to string 'nan', inf values to 'inf'. """ def _recurse_list(val): if val and isinstance(val[0], list): return [_recurse_list(v) for v in val] return [str(v) if np.isnan(v) or np.isinf(v) else v for v in val] return _recurse_list(value.tolist())
python
def to_json(value, **kwargs): """Convert array to JSON list nan values are converted to string 'nan', inf values to 'inf'. """ def _recurse_list(val): if val and isinstance(val[0], list): return [_recurse_list(v) for v in val] return [str(v) if np.isnan(v) or np.isinf(v) else v for v in val] return _recurse_list(value.tolist())
['def', 'to_json', '(', 'value', ',', '*', '*', 'kwargs', ')', ':', 'def', '_recurse_list', '(', 'val', ')', ':', 'if', 'val', 'and', 'isinstance', '(', 'val', '[', '0', ']', ',', 'list', ')', ':', 'return', '[', '_recurse_list', '(', 'v', ')', 'for', 'v', 'in', 'val', ']', 'return', '[', 'str', '(', 'v', ')', 'if', 'np', '.', 'isnan', '(', 'v', ')', 'or', 'np', '.', 'isinf', '(', 'v', ')', 'else', 'v', 'for', 'v', 'in', 'val', ']', 'return', '_recurse_list', '(', 'value', '.', 'tolist', '(', ')', ')']
Convert array to JSON list nan values are converted to string 'nan', inf values to 'inf'.
['Convert', 'array', 'to', 'JSON', 'list']
train
https://github.com/seequent/properties/blob/096b07012fff86b0a880c8c018320c3b512751b9/properties/math.py#L227-L236
747
slimkrazy/python-google-places
googleplaces/__init__.py
GooglePlaces.add_place
def add_place(self, **kwargs): """Adds a place to the Google Places database. On a successful request, this method will return a dict containing the the new Place's place_id and id in keys 'place_id' and 'id' respectively. keyword arguments: name -- The full text name of the Place. Limited to 255 characters. lat_lng -- A dict containing the following keys: lat, lng. accuracy -- The accuracy of the location signal on which this request is based, expressed in meters. types -- The category in which this Place belongs. Only one type can currently be specified for a Place. A string or single element list may be passed in. language -- The language in which the Place's name is being reported. (defaults 'en'). sensor -- Boolean flag denoting if the location came from a device using its location sensor (default False). """ required_kwargs = {'name': [str], 'lat_lng': [dict], 'accuracy': [int], 'types': [str, list]} request_params = {} for key in required_kwargs: if key not in kwargs or kwargs[key] is None: raise ValueError('The %s argument is required.' % key) expected_types = required_kwargs[key] type_is_valid = False for expected_type in expected_types: if isinstance(kwargs[key], expected_type): type_is_valid = True break if not type_is_valid: raise ValueError('Invalid value for %s' % key) if key is not 'lat_lng': request_params[key] = kwargs[key] if len(kwargs['name']) > 255: raise ValueError('The place name must not exceed 255 characters ' + 'in length.') try: kwargs['lat_lng']['lat'] kwargs['lat_lng']['lng'] request_params['location'] = kwargs['lat_lng'] except KeyError: raise ValueError('Invalid keys for lat_lng.') request_params['language'] = (kwargs.get('language') if kwargs.get('language') is not None else lang.ENGLISH) sensor = (kwargs.get('sensor') if kwargs.get('sensor') is not None else False) # At some point Google might support multiple types, so this supports # strings and lists. if isinstance(kwargs['types'], str): request_params['types'] = [kwargs['types']] else: request_params['types'] = kwargs['types'] url, add_response = _fetch_remote_json( GooglePlaces.ADD_API_URL % (str(sensor).lower(), self.api_key), json.dumps(request_params), use_http_post=True) _validate_response(url, add_response) return {'place_id': add_response['place_id'], 'id': add_response['id']}
python
def add_place(self, **kwargs): """Adds a place to the Google Places database. On a successful request, this method will return a dict containing the the new Place's place_id and id in keys 'place_id' and 'id' respectively. keyword arguments: name -- The full text name of the Place. Limited to 255 characters. lat_lng -- A dict containing the following keys: lat, lng. accuracy -- The accuracy of the location signal on which this request is based, expressed in meters. types -- The category in which this Place belongs. Only one type can currently be specified for a Place. A string or single element list may be passed in. language -- The language in which the Place's name is being reported. (defaults 'en'). sensor -- Boolean flag denoting if the location came from a device using its location sensor (default False). """ required_kwargs = {'name': [str], 'lat_lng': [dict], 'accuracy': [int], 'types': [str, list]} request_params = {} for key in required_kwargs: if key not in kwargs or kwargs[key] is None: raise ValueError('The %s argument is required.' % key) expected_types = required_kwargs[key] type_is_valid = False for expected_type in expected_types: if isinstance(kwargs[key], expected_type): type_is_valid = True break if not type_is_valid: raise ValueError('Invalid value for %s' % key) if key is not 'lat_lng': request_params[key] = kwargs[key] if len(kwargs['name']) > 255: raise ValueError('The place name must not exceed 255 characters ' + 'in length.') try: kwargs['lat_lng']['lat'] kwargs['lat_lng']['lng'] request_params['location'] = kwargs['lat_lng'] except KeyError: raise ValueError('Invalid keys for lat_lng.') request_params['language'] = (kwargs.get('language') if kwargs.get('language') is not None else lang.ENGLISH) sensor = (kwargs.get('sensor') if kwargs.get('sensor') is not None else False) # At some point Google might support multiple types, so this supports # strings and lists. if isinstance(kwargs['types'], str): request_params['types'] = [kwargs['types']] else: request_params['types'] = kwargs['types'] url, add_response = _fetch_remote_json( GooglePlaces.ADD_API_URL % (str(sensor).lower(), self.api_key), json.dumps(request_params), use_http_post=True) _validate_response(url, add_response) return {'place_id': add_response['place_id'], 'id': add_response['id']}
['def', 'add_place', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'required_kwargs', '=', '{', "'name'", ':', '[', 'str', ']', ',', "'lat_lng'", ':', '[', 'dict', ']', ',', "'accuracy'", ':', '[', 'int', ']', ',', "'types'", ':', '[', 'str', ',', 'list', ']', '}', 'request_params', '=', '{', '}', 'for', 'key', 'in', 'required_kwargs', ':', 'if', 'key', 'not', 'in', 'kwargs', 'or', 'kwargs', '[', 'key', ']', 'is', 'None', ':', 'raise', 'ValueError', '(', "'The %s argument is required.'", '%', 'key', ')', 'expected_types', '=', 'required_kwargs', '[', 'key', ']', 'type_is_valid', '=', 'False', 'for', 'expected_type', 'in', 'expected_types', ':', 'if', 'isinstance', '(', 'kwargs', '[', 'key', ']', ',', 'expected_type', ')', ':', 'type_is_valid', '=', 'True', 'break', 'if', 'not', 'type_is_valid', ':', 'raise', 'ValueError', '(', "'Invalid value for %s'", '%', 'key', ')', 'if', 'key', 'is', 'not', "'lat_lng'", ':', 'request_params', '[', 'key', ']', '=', 'kwargs', '[', 'key', ']', 'if', 'len', '(', 'kwargs', '[', "'name'", ']', ')', '>', '255', ':', 'raise', 'ValueError', '(', "'The place name must not exceed 255 characters '", '+', "'in length.'", ')', 'try', ':', 'kwargs', '[', "'lat_lng'", ']', '[', "'lat'", ']', 'kwargs', '[', "'lat_lng'", ']', '[', "'lng'", ']', 'request_params', '[', "'location'", ']', '=', 'kwargs', '[', "'lat_lng'", ']', 'except', 'KeyError', ':', 'raise', 'ValueError', '(', "'Invalid keys for lat_lng.'", ')', 'request_params', '[', "'language'", ']', '=', '(', 'kwargs', '.', 'get', '(', "'language'", ')', 'if', 'kwargs', '.', 'get', '(', "'language'", ')', 'is', 'not', 'None', 'else', 'lang', '.', 'ENGLISH', ')', 'sensor', '=', '(', 'kwargs', '.', 'get', '(', "'sensor'", ')', 'if', 'kwargs', '.', 'get', '(', "'sensor'", ')', 'is', 'not', 'None', 'else', 'False', ')', '# At some point Google might support multiple types, so this supports', '# strings and lists.', 'if', 'isinstance', '(', 'kwargs', '[', "'types'", ']', ',', 'str', ')', ':', 'request_params', '[', "'types'", ']', '=', '[', 'kwargs', '[', "'types'", ']', ']', 'else', ':', 'request_params', '[', "'types'", ']', '=', 'kwargs', '[', "'types'", ']', 'url', ',', 'add_response', '=', '_fetch_remote_json', '(', 'GooglePlaces', '.', 'ADD_API_URL', '%', '(', 'str', '(', 'sensor', ')', '.', 'lower', '(', ')', ',', 'self', '.', 'api_key', ')', ',', 'json', '.', 'dumps', '(', 'request_params', ')', ',', 'use_http_post', '=', 'True', ')', '_validate_response', '(', 'url', ',', 'add_response', ')', 'return', '{', "'place_id'", ':', 'add_response', '[', "'place_id'", ']', ',', "'id'", ':', 'add_response', '[', "'id'", ']', '}']
Adds a place to the Google Places database. On a successful request, this method will return a dict containing the the new Place's place_id and id in keys 'place_id' and 'id' respectively. keyword arguments: name -- The full text name of the Place. Limited to 255 characters. lat_lng -- A dict containing the following keys: lat, lng. accuracy -- The accuracy of the location signal on which this request is based, expressed in meters. types -- The category in which this Place belongs. Only one type can currently be specified for a Place. A string or single element list may be passed in. language -- The language in which the Place's name is being reported. (defaults 'en'). sensor -- Boolean flag denoting if the location came from a device using its location sensor (default False).
['Adds', 'a', 'place', 'to', 'the', 'Google', 'Places', 'database', '.']
train
https://github.com/slimkrazy/python-google-places/blob/d4b7363e1655cdc091a6253379f6d2a95b827881/googleplaces/__init__.py#L498-L565
748
googlefonts/glyphsLib
Lib/glyphsLib/builder/axes.py
AxisDefinition.set_user_loc
def set_user_loc(self, master_or_instance, value): """Set the user location of a Glyphs master or instance.""" if hasattr(master_or_instance, "instanceInterpolations"): # The following code is only valid for instances. # Masters also the keys `weight` and `width` but they should not be # used, they are deprecated and should only be used to store # (parts of) the master's name, but not its location. # Try to set the key if possible, i.e. if there is a key, and # if there exists a code that can represent the given value, e.g. # for "weight": 600 can be represented by SemiBold so we use that, # but for 550 there is no code so we will have to set the custom # parameter as well. if self.user_loc_key is not None and hasattr( master_or_instance, self.user_loc_key ): code = user_loc_value_to_instance_string(self.tag, value) value_for_code = user_loc_string_to_value(self.tag, code) setattr(master_or_instance, self.user_loc_key, code) if self.user_loc_param is not None and value != value_for_code: try: class_ = user_loc_value_to_class(self.tag, value) master_or_instance.customParameters[ self.user_loc_param ] = class_ except NotImplementedError: # user_loc_value_to_class only works for weight & width pass return # For masters, set directly the custom parameter (old way) # and also the Axis Location (new way). # Only masters can have an 'Axis Location' parameter. if self.user_loc_param is not None: try: class_ = user_loc_value_to_class(self.tag, value) master_or_instance.customParameters[self.user_loc_param] = class_ except NotImplementedError: pass loc_param = master_or_instance.customParameters["Axis Location"] if loc_param is None: loc_param = [] master_or_instance.customParameters["Axis Location"] = loc_param location = None for loc in loc_param: if loc.get("Axis") == self.name: location = loc if location is None: loc_param.append({"Axis": self.name, "Location": value}) else: location["Location"] = value
python
def set_user_loc(self, master_or_instance, value): """Set the user location of a Glyphs master or instance.""" if hasattr(master_or_instance, "instanceInterpolations"): # The following code is only valid for instances. # Masters also the keys `weight` and `width` but they should not be # used, they are deprecated and should only be used to store # (parts of) the master's name, but not its location. # Try to set the key if possible, i.e. if there is a key, and # if there exists a code that can represent the given value, e.g. # for "weight": 600 can be represented by SemiBold so we use that, # but for 550 there is no code so we will have to set the custom # parameter as well. if self.user_loc_key is not None and hasattr( master_or_instance, self.user_loc_key ): code = user_loc_value_to_instance_string(self.tag, value) value_for_code = user_loc_string_to_value(self.tag, code) setattr(master_or_instance, self.user_loc_key, code) if self.user_loc_param is not None and value != value_for_code: try: class_ = user_loc_value_to_class(self.tag, value) master_or_instance.customParameters[ self.user_loc_param ] = class_ except NotImplementedError: # user_loc_value_to_class only works for weight & width pass return # For masters, set directly the custom parameter (old way) # and also the Axis Location (new way). # Only masters can have an 'Axis Location' parameter. if self.user_loc_param is not None: try: class_ = user_loc_value_to_class(self.tag, value) master_or_instance.customParameters[self.user_loc_param] = class_ except NotImplementedError: pass loc_param = master_or_instance.customParameters["Axis Location"] if loc_param is None: loc_param = [] master_or_instance.customParameters["Axis Location"] = loc_param location = None for loc in loc_param: if loc.get("Axis") == self.name: location = loc if location is None: loc_param.append({"Axis": self.name, "Location": value}) else: location["Location"] = value
['def', 'set_user_loc', '(', 'self', ',', 'master_or_instance', ',', 'value', ')', ':', 'if', 'hasattr', '(', 'master_or_instance', ',', '"instanceInterpolations"', ')', ':', '# The following code is only valid for instances.', '# Masters also the keys `weight` and `width` but they should not be', '# used, they are deprecated and should only be used to store', "# (parts of) the master's name, but not its location.", '# Try to set the key if possible, i.e. if there is a key, and', '# if there exists a code that can represent the given value, e.g.', '# for "weight": 600 can be represented by SemiBold so we use that,', '# but for 550 there is no code so we will have to set the custom', '# parameter as well.', 'if', 'self', '.', 'user_loc_key', 'is', 'not', 'None', 'and', 'hasattr', '(', 'master_or_instance', ',', 'self', '.', 'user_loc_key', ')', ':', 'code', '=', 'user_loc_value_to_instance_string', '(', 'self', '.', 'tag', ',', 'value', ')', 'value_for_code', '=', 'user_loc_string_to_value', '(', 'self', '.', 'tag', ',', 'code', ')', 'setattr', '(', 'master_or_instance', ',', 'self', '.', 'user_loc_key', ',', 'code', ')', 'if', 'self', '.', 'user_loc_param', 'is', 'not', 'None', 'and', 'value', '!=', 'value_for_code', ':', 'try', ':', 'class_', '=', 'user_loc_value_to_class', '(', 'self', '.', 'tag', ',', 'value', ')', 'master_or_instance', '.', 'customParameters', '[', 'self', '.', 'user_loc_param', ']', '=', 'class_', 'except', 'NotImplementedError', ':', '# user_loc_value_to_class only works for weight & width', 'pass', 'return', '# For masters, set directly the custom parameter (old way)', '# and also the Axis Location (new way).', "# Only masters can have an 'Axis Location' parameter.", 'if', 'self', '.', 'user_loc_param', 'is', 'not', 'None', ':', 'try', ':', 'class_', '=', 'user_loc_value_to_class', '(', 'self', '.', 'tag', ',', 'value', ')', 'master_or_instance', '.', 'customParameters', '[', 'self', '.', 'user_loc_param', ']', '=', 'class_', 'except', 'NotImplementedError', ':', 'pass', 'loc_param', '=', 'master_or_instance', '.', 'customParameters', '[', '"Axis Location"', ']', 'if', 'loc_param', 'is', 'None', ':', 'loc_param', '=', '[', ']', 'master_or_instance', '.', 'customParameters', '[', '"Axis Location"', ']', '=', 'loc_param', 'location', '=', 'None', 'for', 'loc', 'in', 'loc_param', ':', 'if', 'loc', '.', 'get', '(', '"Axis"', ')', '==', 'self', '.', 'name', ':', 'location', '=', 'loc', 'if', 'location', 'is', 'None', ':', 'loc_param', '.', 'append', '(', '{', '"Axis"', ':', 'self', '.', 'name', ',', '"Location"', ':', 'value', '}', ')', 'else', ':', 'location', '[', '"Location"', ']', '=', 'value']
Set the user location of a Glyphs master or instance.
['Set', 'the', 'user', 'location', 'of', 'a', 'Glyphs', 'master', 'or', 'instance', '.']
train
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/builder/axes.py#L367-L418
749
Alignak-monitoring/alignak
alignak/daemons/receiverdaemon.py
Receiver.get_daemon_stats
def get_daemon_stats(self, details=False): """Increase the stats provided by the Daemon base class :return: stats dictionary :rtype: dict """ # Call the base Daemon one res = super(Receiver, self).get_daemon_stats(details=details) res.update({'name': self.name, 'type': self.type}) counters = res['counters'] counters['external-commands'] = len(self.external_commands) counters['external-commands-unprocessed'] = len(self.unprocessed_external_commands) return res
python
def get_daemon_stats(self, details=False): """Increase the stats provided by the Daemon base class :return: stats dictionary :rtype: dict """ # Call the base Daemon one res = super(Receiver, self).get_daemon_stats(details=details) res.update({'name': self.name, 'type': self.type}) counters = res['counters'] counters['external-commands'] = len(self.external_commands) counters['external-commands-unprocessed'] = len(self.unprocessed_external_commands) return res
['def', 'get_daemon_stats', '(', 'self', ',', 'details', '=', 'False', ')', ':', '# Call the base Daemon one', 'res', '=', 'super', '(', 'Receiver', ',', 'self', ')', '.', 'get_daemon_stats', '(', 'details', '=', 'details', ')', 'res', '.', 'update', '(', '{', "'name'", ':', 'self', '.', 'name', ',', "'type'", ':', 'self', '.', 'type', '}', ')', 'counters', '=', 'res', '[', "'counters'", ']', 'counters', '[', "'external-commands'", ']', '=', 'len', '(', 'self', '.', 'external_commands', ')', 'counters', '[', "'external-commands-unprocessed'", ']', '=', 'len', '(', 'self', '.', 'unprocessed_external_commands', ')', 'return', 'res']
Increase the stats provided by the Daemon base class :return: stats dictionary :rtype: dict
['Increase', 'the', 'stats', 'provided', 'by', 'the', 'Daemon', 'base', 'class']
train
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/daemons/receiverdaemon.py#L343-L358
750
projecthamster/hamster
src/hamster/lib/stuff.py
totals
def totals(iter, keyfunc, sumfunc): """groups items by field described in keyfunc and counts totals using value from sumfunc """ data = sorted(iter, key=keyfunc) res = {} for k, group in groupby(data, keyfunc): res[k] = sum([sumfunc(entry) for entry in group]) return res
python
def totals(iter, keyfunc, sumfunc): """groups items by field described in keyfunc and counts totals using value from sumfunc """ data = sorted(iter, key=keyfunc) res = {} for k, group in groupby(data, keyfunc): res[k] = sum([sumfunc(entry) for entry in group]) return res
['def', 'totals', '(', 'iter', ',', 'keyfunc', ',', 'sumfunc', ')', ':', 'data', '=', 'sorted', '(', 'iter', ',', 'key', '=', 'keyfunc', ')', 'res', '=', '{', '}', 'for', 'k', ',', 'group', 'in', 'groupby', '(', 'data', ',', 'keyfunc', ')', ':', 'res', '[', 'k', ']', '=', 'sum', '(', '[', 'sumfunc', '(', 'entry', ')', 'for', 'entry', 'in', 'group', ']', ')', 'return', 'res']
groups items by field described in keyfunc and counts totals using value from sumfunc
['groups', 'items', 'by', 'field', 'described', 'in', 'keyfunc', 'and', 'counts', 'totals', 'using', 'value', 'from', 'sumfunc']
train
https://github.com/projecthamster/hamster/blob/ca5254eff53172796ddafc72226c394ed1858245/src/hamster/lib/stuff.py#L224-L234
751
tcalmant/python-javaobj
javaobj/core.py
JavaObjectMarshaller._writeString
def _writeString(self, obj, use_reference=True): """ Appends a string to the serialization stream :param obj: String to serialize :param use_reference: If True, allow writing a reference """ # TODO: Convert to "modified UTF-8" # http://docs.oracle.com/javase/7/docs/api/java/io/DataInput.html#modified-utf-8 string = to_bytes(obj, "utf-8") if use_reference and isinstance(obj, JavaString): try: idx = self.references.index(obj) except ValueError: # First appearance of the string self.references.append(obj) logging.debug( "*** Adding ref 0x%X for string: %s", len(self.references) - 1 + self.BASE_REFERENCE_IDX, obj, ) self._writeStruct(">H", 2, (len(string),)) self.object_stream.write(string) else: # Write a reference to the previous type logging.debug( "*** Reusing ref 0x%X for string: %s", idx + self.BASE_REFERENCE_IDX, obj, ) self.write_reference(idx) else: self._writeStruct(">H", 2, (len(string),)) self.object_stream.write(string)
python
def _writeString(self, obj, use_reference=True): """ Appends a string to the serialization stream :param obj: String to serialize :param use_reference: If True, allow writing a reference """ # TODO: Convert to "modified UTF-8" # http://docs.oracle.com/javase/7/docs/api/java/io/DataInput.html#modified-utf-8 string = to_bytes(obj, "utf-8") if use_reference and isinstance(obj, JavaString): try: idx = self.references.index(obj) except ValueError: # First appearance of the string self.references.append(obj) logging.debug( "*** Adding ref 0x%X for string: %s", len(self.references) - 1 + self.BASE_REFERENCE_IDX, obj, ) self._writeStruct(">H", 2, (len(string),)) self.object_stream.write(string) else: # Write a reference to the previous type logging.debug( "*** Reusing ref 0x%X for string: %s", idx + self.BASE_REFERENCE_IDX, obj, ) self.write_reference(idx) else: self._writeStruct(">H", 2, (len(string),)) self.object_stream.write(string)
['def', '_writeString', '(', 'self', ',', 'obj', ',', 'use_reference', '=', 'True', ')', ':', '# TODO: Convert to "modified UTF-8"', '# http://docs.oracle.com/javase/7/docs/api/java/io/DataInput.html#modified-utf-8', 'string', '=', 'to_bytes', '(', 'obj', ',', '"utf-8"', ')', 'if', 'use_reference', 'and', 'isinstance', '(', 'obj', ',', 'JavaString', ')', ':', 'try', ':', 'idx', '=', 'self', '.', 'references', '.', 'index', '(', 'obj', ')', 'except', 'ValueError', ':', '# First appearance of the string', 'self', '.', 'references', '.', 'append', '(', 'obj', ')', 'logging', '.', 'debug', '(', '"*** Adding ref 0x%X for string: %s"', ',', 'len', '(', 'self', '.', 'references', ')', '-', '1', '+', 'self', '.', 'BASE_REFERENCE_IDX', ',', 'obj', ',', ')', 'self', '.', '_writeStruct', '(', '">H"', ',', '2', ',', '(', 'len', '(', 'string', ')', ',', ')', ')', 'self', '.', 'object_stream', '.', 'write', '(', 'string', ')', 'else', ':', '# Write a reference to the previous type', 'logging', '.', 'debug', '(', '"*** Reusing ref 0x%X for string: %s"', ',', 'idx', '+', 'self', '.', 'BASE_REFERENCE_IDX', ',', 'obj', ',', ')', 'self', '.', 'write_reference', '(', 'idx', ')', 'else', ':', 'self', '.', '_writeStruct', '(', '">H"', ',', '2', ',', '(', 'len', '(', 'string', ')', ',', ')', ')', 'self', '.', 'object_stream', '.', 'write', '(', 'string', ')']
Appends a string to the serialization stream :param obj: String to serialize :param use_reference: If True, allow writing a reference
['Appends', 'a', 'string', 'to', 'the', 'serialization', 'stream']
train
https://github.com/tcalmant/python-javaobj/blob/e042c2cbf1ce9de659b6cb9290b5ccd5442514d1/javaobj/core.py#L1293-L1328
752
edoburu/django-private-storage
private_storage/views.py
PrivateStorageView.get_private_file
def get_private_file(self): """ Return all relevant data in a single object, so this is easy to extend and server implementations can pick what they need. """ return PrivateFile( request=self.request, storage=self.get_storage(), relative_name=self.get_path() )
python
def get_private_file(self): """ Return all relevant data in a single object, so this is easy to extend and server implementations can pick what they need. """ return PrivateFile( request=self.request, storage=self.get_storage(), relative_name=self.get_path() )
['def', 'get_private_file', '(', 'self', ')', ':', 'return', 'PrivateFile', '(', 'request', '=', 'self', '.', 'request', ',', 'storage', '=', 'self', '.', 'get_storage', '(', ')', ',', 'relative_name', '=', 'self', '.', 'get_path', '(', ')', ')']
Return all relevant data in a single object, so this is easy to extend and server implementations can pick what they need.
['Return', 'all', 'relevant', 'data', 'in', 'a', 'single', 'object', 'so', 'this', 'is', 'easy', 'to', 'extend', 'and', 'server', 'implementations', 'can', 'pick', 'what', 'they', 'need', '.']
train
https://github.com/edoburu/django-private-storage/blob/35b718024fee75b0ed3400f601976b20246c7d05/private_storage/views.py#L55-L64
753
bitesofcode/projexui
projexui/widgets/xorbcolumnnavigator.py
XOrbColumnItem.setCurrentSchemaColumn
def setCurrentSchemaColumn(self, column): """ Sets the current item based on the inputed column. :param column | <orb.Column> || None """ if column == self._column: self.treeWidget().setCurrentItem(self) return True for c in range(self.childCount()): if self.child(c).setCurrentSchemaColumn(column): self.setExpanded(True) return True return None
python
def setCurrentSchemaColumn(self, column): """ Sets the current item based on the inputed column. :param column | <orb.Column> || None """ if column == self._column: self.treeWidget().setCurrentItem(self) return True for c in range(self.childCount()): if self.child(c).setCurrentSchemaColumn(column): self.setExpanded(True) return True return None
['def', 'setCurrentSchemaColumn', '(', 'self', ',', 'column', ')', ':', 'if', 'column', '==', 'self', '.', '_column', ':', 'self', '.', 'treeWidget', '(', ')', '.', 'setCurrentItem', '(', 'self', ')', 'return', 'True', 'for', 'c', 'in', 'range', '(', 'self', '.', 'childCount', '(', ')', ')', ':', 'if', 'self', '.', 'child', '(', 'c', ')', '.', 'setCurrentSchemaColumn', '(', 'column', ')', ':', 'self', '.', 'setExpanded', '(', 'True', ')', 'return', 'True', 'return', 'None']
Sets the current item based on the inputed column. :param column | <orb.Column> || None
['Sets', 'the', 'current', 'item', 'based', 'on', 'the', 'inputed', 'column', '.', ':', 'param', 'column', '|', '<orb', '.', 'Column', '>', '||', 'None']
train
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xorbcolumnnavigator.py#L80-L94
754
RJT1990/pyflux
pyflux/families/exponential.py
Exponential.markov_blanket
def markov_blanket(y, mean, scale, shape, skewness): """ Markov blanket for the Exponential distribution Parameters ---------- y : np.ndarray univariate time series mean : np.ndarray array of location parameters for the Exponential distribution scale : float scale parameter for the Exponential distribution shape : float tail thickness parameter for the Exponential distribution skewness : float skewness parameter for the Exponential distribution Returns ---------- - Markov blanket of the Exponential family """ return ss.expon.logpdf(x=y, scale=1/mean)
python
def markov_blanket(y, mean, scale, shape, skewness): """ Markov blanket for the Exponential distribution Parameters ---------- y : np.ndarray univariate time series mean : np.ndarray array of location parameters for the Exponential distribution scale : float scale parameter for the Exponential distribution shape : float tail thickness parameter for the Exponential distribution skewness : float skewness parameter for the Exponential distribution Returns ---------- - Markov blanket of the Exponential family """ return ss.expon.logpdf(x=y, scale=1/mean)
['def', 'markov_blanket', '(', 'y', ',', 'mean', ',', 'scale', ',', 'shape', ',', 'skewness', ')', ':', 'return', 'ss', '.', 'expon', '.', 'logpdf', '(', 'x', '=', 'y', ',', 'scale', '=', '1', '/', 'mean', ')']
Markov blanket for the Exponential distribution Parameters ---------- y : np.ndarray univariate time series mean : np.ndarray array of location parameters for the Exponential distribution scale : float scale parameter for the Exponential distribution shape : float tail thickness parameter for the Exponential distribution skewness : float skewness parameter for the Exponential distribution Returns ---------- - Markov blanket of the Exponential family
['Markov', 'blanket', 'for', 'the', 'Exponential', 'distribution']
train
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/families/exponential.py#L195-L219
755
oasiswork/zimsoap
zimsoap/client.py
ZimbraAdminClient.add_account_alias
def add_account_alias(self, account, alias): """ :param account: an account object to be used as a selector :param alias: email alias address :returns: None (the API itself returns nothing) """ self.request('AddAccountAlias', { 'id': self._get_or_fetch_id(account, self.get_account), 'alias': alias, })
python
def add_account_alias(self, account, alias): """ :param account: an account object to be used as a selector :param alias: email alias address :returns: None (the API itself returns nothing) """ self.request('AddAccountAlias', { 'id': self._get_or_fetch_id(account, self.get_account), 'alias': alias, })
['def', 'add_account_alias', '(', 'self', ',', 'account', ',', 'alias', ')', ':', 'self', '.', 'request', '(', "'AddAccountAlias'", ',', '{', "'id'", ':', 'self', '.', '_get_or_fetch_id', '(', 'account', ',', 'self', '.', 'get_account', ')', ',', "'alias'", ':', 'alias', ',', '}', ')']
:param account: an account object to be used as a selector :param alias: email alias address :returns: None (the API itself returns nothing)
[':', 'param', 'account', ':', 'an', 'account', 'object', 'to', 'be', 'used', 'as', 'a', 'selector', ':', 'param', 'alias', ':', 'email', 'alias', 'address', ':', 'returns', ':', 'None', '(', 'the', 'API', 'itself', 'returns', 'nothing', ')']
train
https://github.com/oasiswork/zimsoap/blob/d1ea2eb4d50f263c9a16e5549af03f1eff3e295e/zimsoap/client.py#L1111-L1120
756
CityOfZion/neo-python
neo/Core/Block.py
Block.Serialize
def Serialize(self, writer): """ Serialize full object. Args: writer (neo.IO.BinaryWriter): """ super(Block, self).Serialize(writer) writer.WriteSerializableArray(self.Transactions)
python
def Serialize(self, writer): """ Serialize full object. Args: writer (neo.IO.BinaryWriter): """ super(Block, self).Serialize(writer) writer.WriteSerializableArray(self.Transactions)
['def', 'Serialize', '(', 'self', ',', 'writer', ')', ':', 'super', '(', 'Block', ',', 'self', ')', '.', 'Serialize', '(', 'writer', ')', 'writer', '.', 'WriteSerializableArray', '(', 'self', '.', 'Transactions', ')']
Serialize full object. Args: writer (neo.IO.BinaryWriter):
['Serialize', 'full', 'object', '.']
train
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Core/Block.py#L262-L270
757
JukeboxPipeline/jukeboxmaya
src/jukeboxmaya/reftrack/__init__.py
get_groupname
def get_groupname(taskfileinfo): """Return a suitable name for a groupname for the given taskfileinfo. :param taskfileinfo: the taskfile info for the file that needs a group when importing/referencing :type taskfileinfo: :class:`jukeboxcore.filesys.TaskFileInfo` :returns: None :rtype: None :raises: None """ element = taskfileinfo.task.element name = element.name return name + "_grp"
python
def get_groupname(taskfileinfo): """Return a suitable name for a groupname for the given taskfileinfo. :param taskfileinfo: the taskfile info for the file that needs a group when importing/referencing :type taskfileinfo: :class:`jukeboxcore.filesys.TaskFileInfo` :returns: None :rtype: None :raises: None """ element = taskfileinfo.task.element name = element.name return name + "_grp"
['def', 'get_groupname', '(', 'taskfileinfo', ')', ':', 'element', '=', 'taskfileinfo', '.', 'task', '.', 'element', 'name', '=', 'element', '.', 'name', 'return', 'name', '+', '"_grp"']
Return a suitable name for a groupname for the given taskfileinfo. :param taskfileinfo: the taskfile info for the file that needs a group when importing/referencing :type taskfileinfo: :class:`jukeboxcore.filesys.TaskFileInfo` :returns: None :rtype: None :raises: None
['Return', 'a', 'suitable', 'name', 'for', 'a', 'groupname', 'for', 'the', 'given', 'taskfileinfo', '.']
train
https://github.com/JukeboxPipeline/jukeboxmaya/blob/c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c/src/jukeboxmaya/reftrack/__init__.py#L24-L35
758
bitesofcode/projexui
projexui/widgets/xganttwidget/xganttwidget.py
XGanttWidget.insertTopLevelItem
def insertTopLevelItem( self, index, item ): """ Inserts the inputed item at the given index in the tree. :param index | <int> item | <XGanttWidgetItem> """ self.treeWidget().insertTopLevelItem(index, item) if self.updatesEnabled(): try: item.sync(recursive = True) except AttributeError: pass
python
def insertTopLevelItem( self, index, item ): """ Inserts the inputed item at the given index in the tree. :param index | <int> item | <XGanttWidgetItem> """ self.treeWidget().insertTopLevelItem(index, item) if self.updatesEnabled(): try: item.sync(recursive = True) except AttributeError: pass
['def', 'insertTopLevelItem', '(', 'self', ',', 'index', ',', 'item', ')', ':', 'self', '.', 'treeWidget', '(', ')', '.', 'insertTopLevelItem', '(', 'index', ',', 'item', ')', 'if', 'self', '.', 'updatesEnabled', '(', ')', ':', 'try', ':', 'item', '.', 'sync', '(', 'recursive', '=', 'True', ')', 'except', 'AttributeError', ':', 'pass']
Inserts the inputed item at the given index in the tree. :param index | <int> item | <XGanttWidgetItem>
['Inserts', 'the', 'inputed', 'item', 'at', 'the', 'given', 'index', 'in', 'the', 'tree', '.', ':', 'param', 'index', '|', '<int', '>', 'item', '|', '<XGanttWidgetItem', '>']
train
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xganttwidget/xganttwidget.py#L376-L389
759
brbsix/subnuker
subnuker.py
SrtProject.split
def split(self, text): """Split text into a list of cells.""" import re if re.search('\n\n', text): return text.split('\n\n') elif re.search('\r\n\r\n', text): return text.split('\r\n\r\n') else: LOGGER.error("'%s' does not appear to be a 'srt' subtitle file", self.filename) sys.exit(1)
python
def split(self, text): """Split text into a list of cells.""" import re if re.search('\n\n', text): return text.split('\n\n') elif re.search('\r\n\r\n', text): return text.split('\r\n\r\n') else: LOGGER.error("'%s' does not appear to be a 'srt' subtitle file", self.filename) sys.exit(1)
['def', 'split', '(', 'self', ',', 'text', ')', ':', 'import', 're', 'if', 're', '.', 'search', '(', "'\\n\\n'", ',', 'text', ')', ':', 'return', 'text', '.', 'split', '(', "'\\n\\n'", ')', 'elif', 're', '.', 'search', '(', "'\\r\\n\\r\\n'", ',', 'text', ')', ':', 'return', 'text', '.', 'split', '(', "'\\r\\n\\r\\n'", ')', 'else', ':', 'LOGGER', '.', 'error', '(', '"\'%s\' does not appear to be a \'srt\' subtitle file"', ',', 'self', '.', 'filename', ')', 'sys', '.', 'exit', '(', '1', ')']
Split text into a list of cells.
['Split', 'text', 'into', 'a', 'list', 'of', 'cells', '.']
train
https://github.com/brbsix/subnuker/blob/a94260a6e84b790a9e39e0b1793443ffd4e1f496/subnuker.py#L320-L331
760
Scifabric/pbs
helpers.py
_add_helpingmaterials
def _add_helpingmaterials(config, helping_file, helping_type): """Add helping materials to a project.""" try: project = find_project_by_short_name(config.project['short_name'], config.pbclient, config.all) data = _load_data(helping_file, helping_type) if len(data) == 0: return ("Unknown format for the tasks file. Use json, csv, po or " "properties.") # Show progress bar with click.progressbar(data, label="Adding Helping Materials") as pgbar: for d in pgbar: helping_info, file_path = create_helping_material_info(d) if file_path: # Create first the media object hm = config.pbclient.create_helpingmaterial(project_id=project.id, info=helping_info, file_path=file_path) check_api_error(hm) z = hm.info.copy() z.update(helping_info) hm.info = z response = config.pbclient.update_helping_material(hm) check_api_error(response) else: response = config.pbclient.create_helpingmaterial(project_id=project.id, info=helping_info) check_api_error(response) # Check if for the data we have to auto-throttle task creation sleep, msg = enable_auto_throttling(config, data, endpoint='/api/helpinmaterial') # If true, warn user if sleep: # pragma: no cover click.secho(msg, fg='yellow') # If auto-throttling enabled, sleep for sleep seconds if sleep: # pragma: no cover time.sleep(sleep) return ("%s helping materials added to project: %s" % (len(data), config.project['short_name'])) except exceptions.ConnectionError: return ("Connection Error! The server %s is not responding" % config.server) except (ProjectNotFound, TaskNotFound): raise
python
def _add_helpingmaterials(config, helping_file, helping_type): """Add helping materials to a project.""" try: project = find_project_by_short_name(config.project['short_name'], config.pbclient, config.all) data = _load_data(helping_file, helping_type) if len(data) == 0: return ("Unknown format for the tasks file. Use json, csv, po or " "properties.") # Show progress bar with click.progressbar(data, label="Adding Helping Materials") as pgbar: for d in pgbar: helping_info, file_path = create_helping_material_info(d) if file_path: # Create first the media object hm = config.pbclient.create_helpingmaterial(project_id=project.id, info=helping_info, file_path=file_path) check_api_error(hm) z = hm.info.copy() z.update(helping_info) hm.info = z response = config.pbclient.update_helping_material(hm) check_api_error(response) else: response = config.pbclient.create_helpingmaterial(project_id=project.id, info=helping_info) check_api_error(response) # Check if for the data we have to auto-throttle task creation sleep, msg = enable_auto_throttling(config, data, endpoint='/api/helpinmaterial') # If true, warn user if sleep: # pragma: no cover click.secho(msg, fg='yellow') # If auto-throttling enabled, sleep for sleep seconds if sleep: # pragma: no cover time.sleep(sleep) return ("%s helping materials added to project: %s" % (len(data), config.project['short_name'])) except exceptions.ConnectionError: return ("Connection Error! The server %s is not responding" % config.server) except (ProjectNotFound, TaskNotFound): raise
['def', '_add_helpingmaterials', '(', 'config', ',', 'helping_file', ',', 'helping_type', ')', ':', 'try', ':', 'project', '=', 'find_project_by_short_name', '(', 'config', '.', 'project', '[', "'short_name'", ']', ',', 'config', '.', 'pbclient', ',', 'config', '.', 'all', ')', 'data', '=', '_load_data', '(', 'helping_file', ',', 'helping_type', ')', 'if', 'len', '(', 'data', ')', '==', '0', ':', 'return', '(', '"Unknown format for the tasks file. Use json, csv, po or "', '"properties."', ')', '# Show progress bar', 'with', 'click', '.', 'progressbar', '(', 'data', ',', 'label', '=', '"Adding Helping Materials"', ')', 'as', 'pgbar', ':', 'for', 'd', 'in', 'pgbar', ':', 'helping_info', ',', 'file_path', '=', 'create_helping_material_info', '(', 'd', ')', 'if', 'file_path', ':', '# Create first the media object', 'hm', '=', 'config', '.', 'pbclient', '.', 'create_helpingmaterial', '(', 'project_id', '=', 'project', '.', 'id', ',', 'info', '=', 'helping_info', ',', 'file_path', '=', 'file_path', ')', 'check_api_error', '(', 'hm', ')', 'z', '=', 'hm', '.', 'info', '.', 'copy', '(', ')', 'z', '.', 'update', '(', 'helping_info', ')', 'hm', '.', 'info', '=', 'z', 'response', '=', 'config', '.', 'pbclient', '.', 'update_helping_material', '(', 'hm', ')', 'check_api_error', '(', 'response', ')', 'else', ':', 'response', '=', 'config', '.', 'pbclient', '.', 'create_helpingmaterial', '(', 'project_id', '=', 'project', '.', 'id', ',', 'info', '=', 'helping_info', ')', 'check_api_error', '(', 'response', ')', '# Check if for the data we have to auto-throttle task creation', 'sleep', ',', 'msg', '=', 'enable_auto_throttling', '(', 'config', ',', 'data', ',', 'endpoint', '=', "'/api/helpinmaterial'", ')', '# If true, warn user', 'if', 'sleep', ':', '# pragma: no cover', 'click', '.', 'secho', '(', 'msg', ',', 'fg', '=', "'yellow'", ')', '# If auto-throttling enabled, sleep for sleep seconds', 'if', 'sleep', ':', '# pragma: no cover', 'time', '.', 'sleep', '(', 'sleep', ')', 'return', '(', '"%s helping materials added to project: %s"', '%', '(', 'len', '(', 'data', ')', ',', 'config', '.', 'project', '[', "'short_name'", ']', ')', ')', 'except', 'exceptions', '.', 'ConnectionError', ':', 'return', '(', '"Connection Error! The server %s is not responding"', '%', 'config', '.', 'server', ')', 'except', '(', 'ProjectNotFound', ',', 'TaskNotFound', ')', ':', 'raise']
Add helping materials to a project.
['Add', 'helping', 'materials', 'to', 'a', 'project', '.']
train
https://github.com/Scifabric/pbs/blob/3e5d5f3f0f5d20f740eaacc4d6e872a0c9fb8b38/helpers.py#L233-L277
761
opendns/pyinvestigate
investigate/investigate.py
Investigate.categorization
def categorization(self, domains, labels=False): '''Get the domain status and categorization of a domain or list of domains. 'domains' can be either a single domain, or a list of domains. Setting 'labels' to True will give back categorizations in human-readable form. For more detail, see https://investigate.umbrella.com/docs/api#categorization ''' if type(domains) is str: return self._get_categorization(domains, labels) elif type(domains) is list: return self._post_categorization(domains, labels) else: raise Investigate.DOMAIN_ERR
python
def categorization(self, domains, labels=False): '''Get the domain status and categorization of a domain or list of domains. 'domains' can be either a single domain, or a list of domains. Setting 'labels' to True will give back categorizations in human-readable form. For more detail, see https://investigate.umbrella.com/docs/api#categorization ''' if type(domains) is str: return self._get_categorization(domains, labels) elif type(domains) is list: return self._post_categorization(domains, labels) else: raise Investigate.DOMAIN_ERR
['def', 'categorization', '(', 'self', ',', 'domains', ',', 'labels', '=', 'False', ')', ':', 'if', 'type', '(', 'domains', ')', 'is', 'str', ':', 'return', 'self', '.', '_get_categorization', '(', 'domains', ',', 'labels', ')', 'elif', 'type', '(', 'domains', ')', 'is', 'list', ':', 'return', 'self', '.', '_post_categorization', '(', 'domains', ',', 'labels', ')', 'else', ':', 'raise', 'Investigate', '.', 'DOMAIN_ERR']
Get the domain status and categorization of a domain or list of domains. 'domains' can be either a single domain, or a list of domains. Setting 'labels' to True will give back categorizations in human-readable form. For more detail, see https://investigate.umbrella.com/docs/api#categorization
['Get', 'the', 'domain', 'status', 'and', 'categorization', 'of', 'a', 'domain', 'or', 'list', 'of', 'domains', '.', 'domains', 'can', 'be', 'either', 'a', 'single', 'domain', 'or', 'a', 'list', 'of', 'domains', '.', 'Setting', 'labels', 'to', 'True', 'will', 'give', 'back', 'categorizations', 'in', 'human', '-', 'readable', 'form', '.']
train
https://github.com/opendns/pyinvestigate/blob/a182e73a750f03e906d9b25842d556db8d2fd54f/investigate/investigate.py#L108-L121
762
dropbox/pyannotate
pyannotate_tools/annotations/parse.py
tokenize
def tokenize(s): # type: (str) -> List[Token] """Translate a type comment into a list of tokens.""" original = s tokens = [] # type: List[Token] while True: if not s: tokens.append(End()) return tokens elif s[0] == ' ': s = s[1:] elif s[0] in '()[],*': tokens.append(Separator(s[0])) s = s[1:] elif s[:2] == '->': tokens.append(Separator('->')) s = s[2:] else: m = re.match(r'[-\w]+(\s*(\.|:)\s*[-/\w]*)*', s) if not m: raise ParseError(original) fullname = m.group(0) fullname = fullname.replace(' ', '') if fullname in TYPE_FIXUPS: fullname = TYPE_FIXUPS[fullname] # pytz creates classes with the name of the timezone being used: # https://github.com/stub42/pytz/blob/f55399cddbef67c56db1b83e0939ecc1e276cf42/src/pytz/tzfile.py#L120-L123 # This causes pyannotates to crash as it's invalid to have a class # name with a `/` in it (e.g. "pytz.tzfile.America/Los_Angeles") if fullname.startswith('pytz.tzfile.'): fullname = 'datetime.tzinfo' if '-' in fullname or '/' in fullname: # Not a valid Python name; there are many places that # generate these, so we just substitute Any rather # than crashing. fullname = 'Any' tokens.append(DottedName(fullname)) s = s[len(m.group(0)):]
python
def tokenize(s): # type: (str) -> List[Token] """Translate a type comment into a list of tokens.""" original = s tokens = [] # type: List[Token] while True: if not s: tokens.append(End()) return tokens elif s[0] == ' ': s = s[1:] elif s[0] in '()[],*': tokens.append(Separator(s[0])) s = s[1:] elif s[:2] == '->': tokens.append(Separator('->')) s = s[2:] else: m = re.match(r'[-\w]+(\s*(\.|:)\s*[-/\w]*)*', s) if not m: raise ParseError(original) fullname = m.group(0) fullname = fullname.replace(' ', '') if fullname in TYPE_FIXUPS: fullname = TYPE_FIXUPS[fullname] # pytz creates classes with the name of the timezone being used: # https://github.com/stub42/pytz/blob/f55399cddbef67c56db1b83e0939ecc1e276cf42/src/pytz/tzfile.py#L120-L123 # This causes pyannotates to crash as it's invalid to have a class # name with a `/` in it (e.g. "pytz.tzfile.America/Los_Angeles") if fullname.startswith('pytz.tzfile.'): fullname = 'datetime.tzinfo' if '-' in fullname or '/' in fullname: # Not a valid Python name; there are many places that # generate these, so we just substitute Any rather # than crashing. fullname = 'Any' tokens.append(DottedName(fullname)) s = s[len(m.group(0)):]
['def', 'tokenize', '(', 's', ')', ':', '# type: (str) -> List[Token]', 'original', '=', 's', 'tokens', '=', '[', ']', '# type: List[Token]', 'while', 'True', ':', 'if', 'not', 's', ':', 'tokens', '.', 'append', '(', 'End', '(', ')', ')', 'return', 'tokens', 'elif', 's', '[', '0', ']', '==', "' '", ':', 's', '=', 's', '[', '1', ':', ']', 'elif', 's', '[', '0', ']', 'in', "'()[],*'", ':', 'tokens', '.', 'append', '(', 'Separator', '(', 's', '[', '0', ']', ')', ')', 's', '=', 's', '[', '1', ':', ']', 'elif', 's', '[', ':', '2', ']', '==', "'->'", ':', 'tokens', '.', 'append', '(', 'Separator', '(', "'->'", ')', ')', 's', '=', 's', '[', '2', ':', ']', 'else', ':', 'm', '=', 're', '.', 'match', '(', "r'[-\\w]+(\\s*(\\.|:)\\s*[-/\\w]*)*'", ',', 's', ')', 'if', 'not', 'm', ':', 'raise', 'ParseError', '(', 'original', ')', 'fullname', '=', 'm', '.', 'group', '(', '0', ')', 'fullname', '=', 'fullname', '.', 'replace', '(', "' '", ',', "''", ')', 'if', 'fullname', 'in', 'TYPE_FIXUPS', ':', 'fullname', '=', 'TYPE_FIXUPS', '[', 'fullname', ']', '# pytz creates classes with the name of the timezone being used:', '# https://github.com/stub42/pytz/blob/f55399cddbef67c56db1b83e0939ecc1e276cf42/src/pytz/tzfile.py#L120-L123', "# This causes pyannotates to crash as it's invalid to have a class", '# name with a `/` in it (e.g. "pytz.tzfile.America/Los_Angeles")', 'if', 'fullname', '.', 'startswith', '(', "'pytz.tzfile.'", ')', ':', 'fullname', '=', "'datetime.tzinfo'", 'if', "'-'", 'in', 'fullname', 'or', "'/'", 'in', 'fullname', ':', '# Not a valid Python name; there are many places that', '# generate these, so we just substitute Any rather', '# than crashing.', 'fullname', '=', "'Any'", 'tokens', '.', 'append', '(', 'DottedName', '(', 'fullname', ')', ')', 's', '=', 's', '[', 'len', '(', 'm', '.', 'group', '(', '0', ')', ')', ':', ']']
Translate a type comment into a list of tokens.
['Translate', 'a', 'type', 'comment', 'into', 'a', 'list', 'of', 'tokens', '.']
train
https://github.com/dropbox/pyannotate/blob/d128c76b8a86f208e5c78716f2a917003650cebc/pyannotate_tools/annotations/parse.py#L173-L210
763
TUNE-Archive/freight_forwarder
freight_forwarder/config.py
Config._scheme_propagation
def _scheme_propagation(self, scheme, definitions): """ Will updated a scheme based on inheritance. This is defined in a scheme objects with ``'inherit': '$definition'``. Will also updated parent objects for nested inheritance. Usage:: >>> SCHEME = { >>> 'thing1': { >>> 'inherit': '$thing2' >>> }, >>> '_': { >>> 'thing2': { >>> 'this_is': 'thing2 is a definition' >>> } >>> } >>> } >>> scheme = SCHEME.get('thing1') >>> if 'inherit' in scheme: >>> scheme = self._scheme_propagation(scheme, SCHEME.get('_')) >>> >>> scheme.get('some_data') :param scheme: A dict, should be a scheme defining validation. :param definitions: A dict, should be defined in the scheme using '_'. :rtype: A :dict: will return a updated copy of the scheme. """ if not isinstance(scheme, dict): raise TypeError('scheme must be a dict to propagate.') inherit_from = scheme.get('inherit') if isinstance(inherit_from, six.string_types): if not inherit_from.startswith('$'): raise AttributeError('When inheriting from an object it must start with a $.') if inherit_from.count('$') > 1: raise AttributeError('When inheriting an object it can only have one $.') if not isinstance(definitions, dict): raise AttributeError("Must define definitions in the root of the SCHEME. " "It is done so with '_': { objs }.") name = inherit_from[1:] definition = definitions.copy().get(name) if not definition: raise LookupError( 'Was unable to find {0} in definitions. The follow are available: {1}.'.format(name, definitions) ) else: raise AttributeError('inherit must be defined in your scheme and be a string value. format: $variable.') updated_scheme = {key: value for key, value in six.iteritems(scheme) if key not in definition} nested_scheme = None for key, value in six.iteritems(definition): if key in scheme: updated_scheme[key] = scheme[key] else: updated_scheme[key] = value if key == 'inherit': nested_scheme = self._scheme_propagation(definition, definitions) # remove inherit key if 'inherit' in updated_scheme: del updated_scheme['inherit'] if nested_scheme is not None: updated_scheme.update(nested_scheme) return updated_scheme
python
def _scheme_propagation(self, scheme, definitions): """ Will updated a scheme based on inheritance. This is defined in a scheme objects with ``'inherit': '$definition'``. Will also updated parent objects for nested inheritance. Usage:: >>> SCHEME = { >>> 'thing1': { >>> 'inherit': '$thing2' >>> }, >>> '_': { >>> 'thing2': { >>> 'this_is': 'thing2 is a definition' >>> } >>> } >>> } >>> scheme = SCHEME.get('thing1') >>> if 'inherit' in scheme: >>> scheme = self._scheme_propagation(scheme, SCHEME.get('_')) >>> >>> scheme.get('some_data') :param scheme: A dict, should be a scheme defining validation. :param definitions: A dict, should be defined in the scheme using '_'. :rtype: A :dict: will return a updated copy of the scheme. """ if not isinstance(scheme, dict): raise TypeError('scheme must be a dict to propagate.') inherit_from = scheme.get('inherit') if isinstance(inherit_from, six.string_types): if not inherit_from.startswith('$'): raise AttributeError('When inheriting from an object it must start with a $.') if inherit_from.count('$') > 1: raise AttributeError('When inheriting an object it can only have one $.') if not isinstance(definitions, dict): raise AttributeError("Must define definitions in the root of the SCHEME. " "It is done so with '_': { objs }.") name = inherit_from[1:] definition = definitions.copy().get(name) if not definition: raise LookupError( 'Was unable to find {0} in definitions. The follow are available: {1}.'.format(name, definitions) ) else: raise AttributeError('inherit must be defined in your scheme and be a string value. format: $variable.') updated_scheme = {key: value for key, value in six.iteritems(scheme) if key not in definition} nested_scheme = None for key, value in six.iteritems(definition): if key in scheme: updated_scheme[key] = scheme[key] else: updated_scheme[key] = value if key == 'inherit': nested_scheme = self._scheme_propagation(definition, definitions) # remove inherit key if 'inherit' in updated_scheme: del updated_scheme['inherit'] if nested_scheme is not None: updated_scheme.update(nested_scheme) return updated_scheme
['def', '_scheme_propagation', '(', 'self', ',', 'scheme', ',', 'definitions', ')', ':', 'if', 'not', 'isinstance', '(', 'scheme', ',', 'dict', ')', ':', 'raise', 'TypeError', '(', "'scheme must be a dict to propagate.'", ')', 'inherit_from', '=', 'scheme', '.', 'get', '(', "'inherit'", ')', 'if', 'isinstance', '(', 'inherit_from', ',', 'six', '.', 'string_types', ')', ':', 'if', 'not', 'inherit_from', '.', 'startswith', '(', "'$'", ')', ':', 'raise', 'AttributeError', '(', "'When inheriting from an object it must start with a $.'", ')', 'if', 'inherit_from', '.', 'count', '(', "'$'", ')', '>', '1', ':', 'raise', 'AttributeError', '(', "'When inheriting an object it can only have one $.'", ')', 'if', 'not', 'isinstance', '(', 'definitions', ',', 'dict', ')', ':', 'raise', 'AttributeError', '(', '"Must define definitions in the root of the SCHEME. "', '"It is done so with \'_\': { objs }."', ')', 'name', '=', 'inherit_from', '[', '1', ':', ']', 'definition', '=', 'definitions', '.', 'copy', '(', ')', '.', 'get', '(', 'name', ')', 'if', 'not', 'definition', ':', 'raise', 'LookupError', '(', "'Was unable to find {0} in definitions. The follow are available: {1}.'", '.', 'format', '(', 'name', ',', 'definitions', ')', ')', 'else', ':', 'raise', 'AttributeError', '(', "'inherit must be defined in your scheme and be a string value. format: $variable.'", ')', 'updated_scheme', '=', '{', 'key', ':', 'value', 'for', 'key', ',', 'value', 'in', 'six', '.', 'iteritems', '(', 'scheme', ')', 'if', 'key', 'not', 'in', 'definition', '}', 'nested_scheme', '=', 'None', 'for', 'key', ',', 'value', 'in', 'six', '.', 'iteritems', '(', 'definition', ')', ':', 'if', 'key', 'in', 'scheme', ':', 'updated_scheme', '[', 'key', ']', '=', 'scheme', '[', 'key', ']', 'else', ':', 'updated_scheme', '[', 'key', ']', '=', 'value', 'if', 'key', '==', "'inherit'", ':', 'nested_scheme', '=', 'self', '.', '_scheme_propagation', '(', 'definition', ',', 'definitions', ')', '# remove inherit key', 'if', "'inherit'", 'in', 'updated_scheme', ':', 'del', 'updated_scheme', '[', "'inherit'", ']', 'if', 'nested_scheme', 'is', 'not', 'None', ':', 'updated_scheme', '.', 'update', '(', 'nested_scheme', ')', 'return', 'updated_scheme']
Will updated a scheme based on inheritance. This is defined in a scheme objects with ``'inherit': '$definition'``. Will also updated parent objects for nested inheritance. Usage:: >>> SCHEME = { >>> 'thing1': { >>> 'inherit': '$thing2' >>> }, >>> '_': { >>> 'thing2': { >>> 'this_is': 'thing2 is a definition' >>> } >>> } >>> } >>> scheme = SCHEME.get('thing1') >>> if 'inherit' in scheme: >>> scheme = self._scheme_propagation(scheme, SCHEME.get('_')) >>> >>> scheme.get('some_data') :param scheme: A dict, should be a scheme defining validation. :param definitions: A dict, should be defined in the scheme using '_'. :rtype: A :dict: will return a updated copy of the scheme.
['Will', 'updated', 'a', 'scheme', 'based', 'on', 'inheritance', '.', 'This', 'is', 'defined', 'in', 'a', 'scheme', 'objects', 'with', 'inherit', ':', '$definition', '.', 'Will', 'also', 'updated', 'parent', 'objects', 'for', 'nested', 'inheritance', '.']
train
https://github.com/TUNE-Archive/freight_forwarder/blob/6ea4a49f474ec04abb8bb81b175c774a16b5312f/freight_forwarder/config.py#L1711-L1780
764
Synerty/peek-plugin-base
peek_plugin_base/storage/DbConnection.py
DbConnection.prefetchDeclarativeIds
def prefetchDeclarativeIds(self, Declarative, count) -> DelcarativeIdGen: """ Prefetch Declarative IDs This function prefetches a chunk of IDs from a database sequence. Doing this allows us to preallocate the IDs before an insert, which significantly speeds up : * Orm inserts, especially those using inheritance * When we need the ID to assign it to a related object that we're also inserting. :param Declarative: The SQLAlchemy declarative class. (The class that inherits from DeclarativeBase) :param count: The number of IDs to prefetch :return: An iterable that dispenses the new IDs """ return _commonPrefetchDeclarativeIds( self.dbEngine, self._sequenceMutex, Declarative, count )
python
def prefetchDeclarativeIds(self, Declarative, count) -> DelcarativeIdGen: """ Prefetch Declarative IDs This function prefetches a chunk of IDs from a database sequence. Doing this allows us to preallocate the IDs before an insert, which significantly speeds up : * Orm inserts, especially those using inheritance * When we need the ID to assign it to a related object that we're also inserting. :param Declarative: The SQLAlchemy declarative class. (The class that inherits from DeclarativeBase) :param count: The number of IDs to prefetch :return: An iterable that dispenses the new IDs """ return _commonPrefetchDeclarativeIds( self.dbEngine, self._sequenceMutex, Declarative, count )
['def', 'prefetchDeclarativeIds', '(', 'self', ',', 'Declarative', ',', 'count', ')', '->', 'DelcarativeIdGen', ':', 'return', '_commonPrefetchDeclarativeIds', '(', 'self', '.', 'dbEngine', ',', 'self', '.', '_sequenceMutex', ',', 'Declarative', ',', 'count', ')']
Prefetch Declarative IDs This function prefetches a chunk of IDs from a database sequence. Doing this allows us to preallocate the IDs before an insert, which significantly speeds up : * Orm inserts, especially those using inheritance * When we need the ID to assign it to a related object that we're also inserting. :param Declarative: The SQLAlchemy declarative class. (The class that inherits from DeclarativeBase) :param count: The number of IDs to prefetch :return: An iterable that dispenses the new IDs
['Prefetch', 'Declarative', 'IDs']
train
https://github.com/Synerty/peek-plugin-base/blob/276101d028e1ee0678af514c761b74cce5a5cda9/peek_plugin_base/storage/DbConnection.py#L152-L171
765
PeerAssets/pypeerassets
pypeerassets/provider/explorer.py
Explorer.getblockhash
def getblockhash(self, index: int) -> str: '''Returns the hash of the block at ; index 0 is the genesis block.''' return cast(str, self.api_fetch('getblockhash?index=' + str(index)))
python
def getblockhash(self, index: int) -> str: '''Returns the hash of the block at ; index 0 is the genesis block.''' return cast(str, self.api_fetch('getblockhash?index=' + str(index)))
['def', 'getblockhash', '(', 'self', ',', 'index', ':', 'int', ')', '->', 'str', ':', 'return', 'cast', '(', 'str', ',', 'self', '.', 'api_fetch', '(', "'getblockhash?index='", '+', 'str', '(', 'index', ')', ')', ')']
Returns the hash of the block at ; index 0 is the genesis block.
['Returns', 'the', 'hash', 'of', 'the', 'block', 'at', ';', 'index', '0', 'is', 'the', 'genesis', 'block', '.']
train
https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/provider/explorer.py#L73-L76
766
rigetti/pyquil
pyquil/api/_quantum_computer.py
_canonicalize_name
def _canonicalize_name(prefix, qvm_type, noisy): """Take the output of _parse_name to create a canonical name. """ if noisy: noise_suffix = '-noisy' else: noise_suffix = '' if qvm_type is None: qvm_suffix = '' elif qvm_type == 'qvm': qvm_suffix = '-qvm' elif qvm_type == 'pyqvm': qvm_suffix = '-pyqvm' else: raise ValueError(f"Unknown qvm_type {qvm_type}") name = f'{prefix}{noise_suffix}{qvm_suffix}' return name
python
def _canonicalize_name(prefix, qvm_type, noisy): """Take the output of _parse_name to create a canonical name. """ if noisy: noise_suffix = '-noisy' else: noise_suffix = '' if qvm_type is None: qvm_suffix = '' elif qvm_type == 'qvm': qvm_suffix = '-qvm' elif qvm_type == 'pyqvm': qvm_suffix = '-pyqvm' else: raise ValueError(f"Unknown qvm_type {qvm_type}") name = f'{prefix}{noise_suffix}{qvm_suffix}' return name
['def', '_canonicalize_name', '(', 'prefix', ',', 'qvm_type', ',', 'noisy', ')', ':', 'if', 'noisy', ':', 'noise_suffix', '=', "'-noisy'", 'else', ':', 'noise_suffix', '=', "''", 'if', 'qvm_type', 'is', 'None', ':', 'qvm_suffix', '=', "''", 'elif', 'qvm_type', '==', "'qvm'", ':', 'qvm_suffix', '=', "'-qvm'", 'elif', 'qvm_type', '==', "'pyqvm'", ':', 'qvm_suffix', '=', "'-pyqvm'", 'else', ':', 'raise', 'ValueError', '(', 'f"Unknown qvm_type {qvm_type}"', ')', 'name', '=', "f'{prefix}{noise_suffix}{qvm_suffix}'", 'return', 'name']
Take the output of _parse_name to create a canonical name.
['Take', 'the', 'output', 'of', '_parse_name', 'to', 'create', 'a', 'canonical', 'name', '.']
train
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/api/_quantum_computer.py#L344-L362
767
UCL-INGI/INGInious
inginious/frontend/user_manager.py
UserManager.course_unregister_user
def course_unregister_user(self, course, username=None): """ Unregister a user to the course :param course: a Course object :param username: The username of the user that we want to unregister. If None, uses self.session_username() """ if username is None: username = self.session_username() # Needed if user belongs to a group self._database.aggregations.find_one_and_update( {"courseid": course.get_id(), "groups.students": username}, {"$pull": {"groups.$.students": username, "students": username}}) # If user doesn't belong to a group, will ensure correct deletion self._database.aggregations.find_one_and_update( {"courseid": course.get_id(), "students": username}, {"$pull": {"students": username}}) self._logger.info("User %s unregistered from course %s", username, course.get_id())
python
def course_unregister_user(self, course, username=None): """ Unregister a user to the course :param course: a Course object :param username: The username of the user that we want to unregister. If None, uses self.session_username() """ if username is None: username = self.session_username() # Needed if user belongs to a group self._database.aggregations.find_one_and_update( {"courseid": course.get_id(), "groups.students": username}, {"$pull": {"groups.$.students": username, "students": username}}) # If user doesn't belong to a group, will ensure correct deletion self._database.aggregations.find_one_and_update( {"courseid": course.get_id(), "students": username}, {"$pull": {"students": username}}) self._logger.info("User %s unregistered from course %s", username, course.get_id())
['def', 'course_unregister_user', '(', 'self', ',', 'course', ',', 'username', '=', 'None', ')', ':', 'if', 'username', 'is', 'None', ':', 'username', '=', 'self', '.', 'session_username', '(', ')', '# Needed if user belongs to a group', 'self', '.', '_database', '.', 'aggregations', '.', 'find_one_and_update', '(', '{', '"courseid"', ':', 'course', '.', 'get_id', '(', ')', ',', '"groups.students"', ':', 'username', '}', ',', '{', '"$pull"', ':', '{', '"groups.$.students"', ':', 'username', ',', '"students"', ':', 'username', '}', '}', ')', "# If user doesn't belong to a group, will ensure correct deletion", 'self', '.', '_database', '.', 'aggregations', '.', 'find_one_and_update', '(', '{', '"courseid"', ':', 'course', '.', 'get_id', '(', ')', ',', '"students"', ':', 'username', '}', ',', '{', '"$pull"', ':', '{', '"students"', ':', 'username', '}', '}', ')', 'self', '.', '_logger', '.', 'info', '(', '"User %s unregistered from course %s"', ',', 'username', ',', 'course', '.', 'get_id', '(', ')', ')']
Unregister a user to the course :param course: a Course object :param username: The username of the user that we want to unregister. If None, uses self.session_username()
['Unregister', 'a', 'user', 'to', 'the', 'course', ':', 'param', 'course', ':', 'a', 'Course', 'object', ':', 'param', 'username', ':', 'The', 'username', 'of', 'the', 'user', 'that', 'we', 'want', 'to', 'unregister', '.', 'If', 'None', 'uses', 'self', '.', 'session_username', '()']
train
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/user_manager.py#L702-L721
768
oceanprotocol/squid-py
squid_py/ocean/ocean_conditions.py
OceanConditions.refund_reward
def refund_reward(self, agreement_id, amount, account): """ Refund reaward condition. :param agreement_id: id of the agreement, hex str :param amount: Amount of tokens, int :param account: Account :return: """ return self.release_reward(agreement_id, amount, account)
python
def refund_reward(self, agreement_id, amount, account): """ Refund reaward condition. :param agreement_id: id of the agreement, hex str :param amount: Amount of tokens, int :param account: Account :return: """ return self.release_reward(agreement_id, amount, account)
['def', 'refund_reward', '(', 'self', ',', 'agreement_id', ',', 'amount', ',', 'account', ')', ':', 'return', 'self', '.', 'release_reward', '(', 'agreement_id', ',', 'amount', ',', 'account', ')']
Refund reaward condition. :param agreement_id: id of the agreement, hex str :param amount: Amount of tokens, int :param account: Account :return:
['Refund', 'reaward', 'condition', '.']
train
https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/ocean/ocean_conditions.py#L65-L74
769
google/grr
grr/server/grr_response_server/databases/mysql_users.py
MySQLDBUsersMixin.DeleteGRRUser
def DeleteGRRUser(self, username, cursor=None): """Deletes the user and all related metadata with the given username.""" cursor.execute("DELETE FROM grr_users WHERE username_hash = %s", (mysql_utils.Hash(username),)) if cursor.rowcount == 0: raise db.UnknownGRRUserError(username)
python
def DeleteGRRUser(self, username, cursor=None): """Deletes the user and all related metadata with the given username.""" cursor.execute("DELETE FROM grr_users WHERE username_hash = %s", (mysql_utils.Hash(username),)) if cursor.rowcount == 0: raise db.UnknownGRRUserError(username)
['def', 'DeleteGRRUser', '(', 'self', ',', 'username', ',', 'cursor', '=', 'None', ')', ':', 'cursor', '.', 'execute', '(', '"DELETE FROM grr_users WHERE username_hash = %s"', ',', '(', 'mysql_utils', '.', 'Hash', '(', 'username', ')', ',', ')', ')', 'if', 'cursor', '.', 'rowcount', '==', '0', ':', 'raise', 'db', '.', 'UnknownGRRUserError', '(', 'username', ')']
Deletes the user and all related metadata with the given username.
['Deletes', 'the', 'user', 'and', 'all', 'related', 'metadata', 'with', 'the', 'given', 'username', '.']
train
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mysql_users.py#L137-L143
770
mattloper/opendr
opendr/topology.py
get_vert_connectivity
def get_vert_connectivity(mesh_v, mesh_f): """Returns a sparse matrix (of size #verts x #verts) where each nonzero element indicates a neighborhood relation. For example, if there is a nonzero element in position (15,12), that means vertex 15 is connected by an edge to vertex 12.""" vpv = sp.csc_matrix((len(mesh_v),len(mesh_v))) # for each column in the faces... for i in range(3): IS = mesh_f[:,i] JS = mesh_f[:,(i+1)%3] data = np.ones(len(IS)) ij = np.vstack((row(IS.flatten()), row(JS.flatten()))) mtx = sp.csc_matrix((data, ij), shape=vpv.shape) vpv = vpv + mtx + mtx.T return vpv
python
def get_vert_connectivity(mesh_v, mesh_f): """Returns a sparse matrix (of size #verts x #verts) where each nonzero element indicates a neighborhood relation. For example, if there is a nonzero element in position (15,12), that means vertex 15 is connected by an edge to vertex 12.""" vpv = sp.csc_matrix((len(mesh_v),len(mesh_v))) # for each column in the faces... for i in range(3): IS = mesh_f[:,i] JS = mesh_f[:,(i+1)%3] data = np.ones(len(IS)) ij = np.vstack((row(IS.flatten()), row(JS.flatten()))) mtx = sp.csc_matrix((data, ij), shape=vpv.shape) vpv = vpv + mtx + mtx.T return vpv
['def', 'get_vert_connectivity', '(', 'mesh_v', ',', 'mesh_f', ')', ':', 'vpv', '=', 'sp', '.', 'csc_matrix', '(', '(', 'len', '(', 'mesh_v', ')', ',', 'len', '(', 'mesh_v', ')', ')', ')', '# for each column in the faces...', 'for', 'i', 'in', 'range', '(', '3', ')', ':', 'IS', '=', 'mesh_f', '[', ':', ',', 'i', ']', 'JS', '=', 'mesh_f', '[', ':', ',', '(', 'i', '+', '1', ')', '%', '3', ']', 'data', '=', 'np', '.', 'ones', '(', 'len', '(', 'IS', ')', ')', 'ij', '=', 'np', '.', 'vstack', '(', '(', 'row', '(', 'IS', '.', 'flatten', '(', ')', ')', ',', 'row', '(', 'JS', '.', 'flatten', '(', ')', ')', ')', ')', 'mtx', '=', 'sp', '.', 'csc_matrix', '(', '(', 'data', ',', 'ij', ')', ',', 'shape', '=', 'vpv', '.', 'shape', ')', 'vpv', '=', 'vpv', '+', 'mtx', '+', 'mtx', '.', 'T', 'return', 'vpv']
Returns a sparse matrix (of size #verts x #verts) where each nonzero element indicates a neighborhood relation. For example, if there is a nonzero element in position (15,12), that means vertex 15 is connected by an edge to vertex 12.
['Returns', 'a', 'sparse', 'matrix', '(', 'of', 'size', '#verts', 'x', '#verts', ')', 'where', 'each', 'nonzero', 'element', 'indicates', 'a', 'neighborhood', 'relation', '.', 'For', 'example', 'if', 'there', 'is', 'a', 'nonzero', 'element', 'in', 'position', '(', '15', '12', ')', 'that', 'means', 'vertex', '15', 'is', 'connected', 'by', 'an', 'edge', 'to', 'vertex', '12', '.']
train
https://github.com/mattloper/opendr/blob/bc16a6a51771d6e062d088ba5cede66649b7c7ec/opendr/topology.py#L18-L35
771
HewlettPackard/python-hpOneView
hpOneView/resources/servers/server_profiles.py
ServerProfiles.get_new_profile_template
def get_new_profile_template(self): """ Retrieves the profile template for a given server profile. Returns: dict: Server profile template. """ uri = '{}/new-profile-template'.format(self.data["uri"]) return self._helper.do_get(uri)
python
def get_new_profile_template(self): """ Retrieves the profile template for a given server profile. Returns: dict: Server profile template. """ uri = '{}/new-profile-template'.format(self.data["uri"]) return self._helper.do_get(uri)
['def', 'get_new_profile_template', '(', 'self', ')', ':', 'uri', '=', "'{}/new-profile-template'", '.', 'format', '(', 'self', '.', 'data', '[', '"uri"', ']', ')', 'return', 'self', '.', '_helper', '.', 'do_get', '(', 'uri', ')']
Retrieves the profile template for a given server profile. Returns: dict: Server profile template.
['Retrieves', 'the', 'profile', 'template', 'for', 'a', 'given', 'server', 'profile', '.']
train
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/resources/servers/server_profiles.py#L323-L331
772
ewels/MultiQC
multiqc/modules/picard/ValidateSamFile.py
_parse_reports_by_type
def _parse_reports_by_type(self): """ Returns a data dictionary Goes through logs and parses them based on 'No errors found', VERBOSE or SUMMARY type. """ data = dict() for file_meta in self.find_log_files('picard/sam_file_validation', filehandles=True): sample = file_meta['s_name'] if sample in data: log.debug("Duplicate sample name found! Overwriting: {}".format(sample)) filehandle = file_meta['f'] first_line = filehandle.readline().rstrip() filehandle.seek(0) # Rewind reading of the file if 'No errors found' in first_line: sample_data = _parse_no_error_report() elif first_line.startswith('ERROR') or first_line.startswith('WARNING'): sample_data = _parse_verbose_report(filehandle) else: sample_data = _parse_summary_report(filehandle) data[sample] = sample_data return data
python
def _parse_reports_by_type(self): """ Returns a data dictionary Goes through logs and parses them based on 'No errors found', VERBOSE or SUMMARY type. """ data = dict() for file_meta in self.find_log_files('picard/sam_file_validation', filehandles=True): sample = file_meta['s_name'] if sample in data: log.debug("Duplicate sample name found! Overwriting: {}".format(sample)) filehandle = file_meta['f'] first_line = filehandle.readline().rstrip() filehandle.seek(0) # Rewind reading of the file if 'No errors found' in first_line: sample_data = _parse_no_error_report() elif first_line.startswith('ERROR') or first_line.startswith('WARNING'): sample_data = _parse_verbose_report(filehandle) else: sample_data = _parse_summary_report(filehandle) data[sample] = sample_data return data
['def', '_parse_reports_by_type', '(', 'self', ')', ':', 'data', '=', 'dict', '(', ')', 'for', 'file_meta', 'in', 'self', '.', 'find_log_files', '(', "'picard/sam_file_validation'", ',', 'filehandles', '=', 'True', ')', ':', 'sample', '=', 'file_meta', '[', "'s_name'", ']', 'if', 'sample', 'in', 'data', ':', 'log', '.', 'debug', '(', '"Duplicate sample name found! Overwriting: {}"', '.', 'format', '(', 'sample', ')', ')', 'filehandle', '=', 'file_meta', '[', "'f'", ']', 'first_line', '=', 'filehandle', '.', 'readline', '(', ')', '.', 'rstrip', '(', ')', 'filehandle', '.', 'seek', '(', '0', ')', '# Rewind reading of the file', 'if', "'No errors found'", 'in', 'first_line', ':', 'sample_data', '=', '_parse_no_error_report', '(', ')', 'elif', 'first_line', '.', 'startswith', '(', "'ERROR'", ')', 'or', 'first_line', '.', 'startswith', '(', "'WARNING'", ')', ':', 'sample_data', '=', '_parse_verbose_report', '(', 'filehandle', ')', 'else', ':', 'sample_data', '=', '_parse_summary_report', '(', 'filehandle', ')', 'data', '[', 'sample', ']', '=', 'sample_data', 'return', 'data']
Returns a data dictionary Goes through logs and parses them based on 'No errors found', VERBOSE or SUMMARY type.
['Returns', 'a', 'data', 'dictionary']
train
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/picard/ValidateSamFile.py#L110-L137
773
eqcorrscan/EQcorrscan
eqcorrscan/utils/catalog_to_dd.py
read_phase
def read_phase(ph_file): """ Read hypoDD phase files into Obspy catalog class. :type ph_file: str :param ph_file: Phase file to read event info from. :returns: Catalog of events from file. :rtype: :class:`obspy.core.event.Catalog` >>> from obspy.core.event.catalog import Catalog >>> # Get the path to the test data >>> import eqcorrscan >>> import os >>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data' >>> catalog = read_phase(TEST_PATH + '/tunnel.phase') >>> isinstance(catalog, Catalog) True """ ph_catalog = Catalog() f = open(ph_file, 'r') # Topline of each event is marked by # in position 0 for line in f: if line[0] == '#': if 'event_text' not in locals(): event_text = {'header': line.rstrip(), 'picks': []} else: ph_catalog.append(_phase_to_event(event_text)) event_text = {'header': line.rstrip(), 'picks': []} else: event_text['picks'].append(line.rstrip()) ph_catalog.append(_phase_to_event(event_text)) return ph_catalog
python
def read_phase(ph_file): """ Read hypoDD phase files into Obspy catalog class. :type ph_file: str :param ph_file: Phase file to read event info from. :returns: Catalog of events from file. :rtype: :class:`obspy.core.event.Catalog` >>> from obspy.core.event.catalog import Catalog >>> # Get the path to the test data >>> import eqcorrscan >>> import os >>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data' >>> catalog = read_phase(TEST_PATH + '/tunnel.phase') >>> isinstance(catalog, Catalog) True """ ph_catalog = Catalog() f = open(ph_file, 'r') # Topline of each event is marked by # in position 0 for line in f: if line[0] == '#': if 'event_text' not in locals(): event_text = {'header': line.rstrip(), 'picks': []} else: ph_catalog.append(_phase_to_event(event_text)) event_text = {'header': line.rstrip(), 'picks': []} else: event_text['picks'].append(line.rstrip()) ph_catalog.append(_phase_to_event(event_text)) return ph_catalog
['def', 'read_phase', '(', 'ph_file', ')', ':', 'ph_catalog', '=', 'Catalog', '(', ')', 'f', '=', 'open', '(', 'ph_file', ',', "'r'", ')', '# Topline of each event is marked by # in position 0', 'for', 'line', 'in', 'f', ':', 'if', 'line', '[', '0', ']', '==', "'#'", ':', 'if', "'event_text'", 'not', 'in', 'locals', '(', ')', ':', 'event_text', '=', '{', "'header'", ':', 'line', '.', 'rstrip', '(', ')', ',', "'picks'", ':', '[', ']', '}', 'else', ':', 'ph_catalog', '.', 'append', '(', '_phase_to_event', '(', 'event_text', ')', ')', 'event_text', '=', '{', "'header'", ':', 'line', '.', 'rstrip', '(', ')', ',', "'picks'", ':', '[', ']', '}', 'else', ':', 'event_text', '[', "'picks'", ']', '.', 'append', '(', 'line', '.', 'rstrip', '(', ')', ')', 'ph_catalog', '.', 'append', '(', '_phase_to_event', '(', 'event_text', ')', ')', 'return', 'ph_catalog']
Read hypoDD phase files into Obspy catalog class. :type ph_file: str :param ph_file: Phase file to read event info from. :returns: Catalog of events from file. :rtype: :class:`obspy.core.event.Catalog` >>> from obspy.core.event.catalog import Catalog >>> # Get the path to the test data >>> import eqcorrscan >>> import os >>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data' >>> catalog = read_phase(TEST_PATH + '/tunnel.phase') >>> isinstance(catalog, Catalog) True
['Read', 'hypoDD', 'phase', 'files', 'into', 'Obspy', 'catalog', 'class', '.']
train
https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/catalog_to_dd.py#L633-L667
774
saltstack/salt
salt/modules/boto_cloudwatch.py
delete_alarm
def delete_alarm(name, region=None, key=None, keyid=None, profile=None): ''' Delete a cloudwatch alarm CLI example to delete a queue:: salt myminion boto_cloudwatch.delete_alarm myalarm region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.delete_alarms([name]) log.info('Deleted alarm %s', name) return True
python
def delete_alarm(name, region=None, key=None, keyid=None, profile=None): ''' Delete a cloudwatch alarm CLI example to delete a queue:: salt myminion boto_cloudwatch.delete_alarm myalarm region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) conn.delete_alarms([name]) log.info('Deleted alarm %s', name) return True
['def', 'delete_alarm', '(', 'name', ',', 'region', '=', 'None', ',', 'key', '=', 'None', ',', 'keyid', '=', 'None', ',', 'profile', '=', 'None', ')', ':', 'conn', '=', '_get_conn', '(', 'region', '=', 'region', ',', 'key', '=', 'key', ',', 'keyid', '=', 'keyid', ',', 'profile', '=', 'profile', ')', 'conn', '.', 'delete_alarms', '(', '[', 'name', ']', ')', 'log', '.', 'info', '(', "'Deleted alarm %s'", ',', 'name', ')', 'return', 'True']
Delete a cloudwatch alarm CLI example to delete a queue:: salt myminion boto_cloudwatch.delete_alarm myalarm region=us-east-1
['Delete', 'a', 'cloudwatch', 'alarm']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_cloudwatch.py#L299-L311
775
coldfix/udiskie
udiskie/async_.py
gather._subtask_error
def _subtask_error(self, idx, error): """Receive an error from a single subtask.""" self.set_exception(error) self.errbacks.clear()
python
def _subtask_error(self, idx, error): """Receive an error from a single subtask.""" self.set_exception(error) self.errbacks.clear()
['def', '_subtask_error', '(', 'self', ',', 'idx', ',', 'error', ')', ':', 'self', '.', 'set_exception', '(', 'error', ')', 'self', '.', 'errbacks', '.', 'clear', '(', ')']
Receive an error from a single subtask.
['Receive', 'an', 'error', 'from', 'a', 'single', 'subtask', '.']
train
https://github.com/coldfix/udiskie/blob/804c9d27df6f7361fec3097c432398f2d702f911/udiskie/async_.py#L141-L144
776
dcos/shakedown
shakedown/dcos/security.py
add_user
def add_user(uid, password, desc=None): """ Adds user to the DCOS Enterprise. If not description is provided the uid will be used for the description. :param uid: user id :type uid: str :param password: password :type password: str :param desc: description of user :type desc: str """ try: desc = uid if desc is None else desc user_object = {"description": desc, "password": password} acl_url = urljoin(_acl_url(), 'users/{}'.format(uid)) r = http.put(acl_url, json=user_object) assert r.status_code == 201 except DCOSHTTPException as e: # already exists if e.response.status_code != 409: raise
python
def add_user(uid, password, desc=None): """ Adds user to the DCOS Enterprise. If not description is provided the uid will be used for the description. :param uid: user id :type uid: str :param password: password :type password: str :param desc: description of user :type desc: str """ try: desc = uid if desc is None else desc user_object = {"description": desc, "password": password} acl_url = urljoin(_acl_url(), 'users/{}'.format(uid)) r = http.put(acl_url, json=user_object) assert r.status_code == 201 except DCOSHTTPException as e: # already exists if e.response.status_code != 409: raise
['def', 'add_user', '(', 'uid', ',', 'password', ',', 'desc', '=', 'None', ')', ':', 'try', ':', 'desc', '=', 'uid', 'if', 'desc', 'is', 'None', 'else', 'desc', 'user_object', '=', '{', '"description"', ':', 'desc', ',', '"password"', ':', 'password', '}', 'acl_url', '=', 'urljoin', '(', '_acl_url', '(', ')', ',', "'users/{}'", '.', 'format', '(', 'uid', ')', ')', 'r', '=', 'http', '.', 'put', '(', 'acl_url', ',', 'json', '=', 'user_object', ')', 'assert', 'r', '.', 'status_code', '==', '201', 'except', 'DCOSHTTPException', 'as', 'e', ':', '# already exists', 'if', 'e', '.', 'response', '.', 'status_code', '!=', '409', ':', 'raise']
Adds user to the DCOS Enterprise. If not description is provided the uid will be used for the description. :param uid: user id :type uid: str :param password: password :type password: str :param desc: description of user :type desc: str
['Adds', 'user', 'to', 'the', 'DCOS', 'Enterprise', '.', 'If', 'not', 'description', 'is', 'provided', 'the', 'uid', 'will', 'be', 'used', 'for', 'the', 'description', '.']
train
https://github.com/dcos/shakedown/blob/e2f9e2382788dbcd29bd18aa058b76e7c3b83b3e/shakedown/dcos/security.py#L17-L37
777
tensorflow/tensorboard
tensorboard/plugins/hparams/list_session_groups.py
Handler._build_session
def _build_session(self, name, start_info, end_info): """Builds a session object.""" assert start_info is not None result = api_pb2.Session( name=name, start_time_secs=start_info.start_time_secs, model_uri=start_info.model_uri, metric_values=self._build_session_metric_values(name), monitor_url=start_info.monitor_url) if end_info is not None: result.status = end_info.status result.end_time_secs = end_info.end_time_secs return result
python
def _build_session(self, name, start_info, end_info): """Builds a session object.""" assert start_info is not None result = api_pb2.Session( name=name, start_time_secs=start_info.start_time_secs, model_uri=start_info.model_uri, metric_values=self._build_session_metric_values(name), monitor_url=start_info.monitor_url) if end_info is not None: result.status = end_info.status result.end_time_secs = end_info.end_time_secs return result
['def', '_build_session', '(', 'self', ',', 'name', ',', 'start_info', ',', 'end_info', ')', ':', 'assert', 'start_info', 'is', 'not', 'None', 'result', '=', 'api_pb2', '.', 'Session', '(', 'name', '=', 'name', ',', 'start_time_secs', '=', 'start_info', '.', 'start_time_secs', ',', 'model_uri', '=', 'start_info', '.', 'model_uri', ',', 'metric_values', '=', 'self', '.', '_build_session_metric_values', '(', 'name', ')', ',', 'monitor_url', '=', 'start_info', '.', 'monitor_url', ')', 'if', 'end_info', 'is', 'not', 'None', ':', 'result', '.', 'status', '=', 'end_info', '.', 'status', 'result', '.', 'end_time_secs', '=', 'end_info', '.', 'end_time_secs', 'return', 'result']
Builds a session object.
['Builds', 'a', 'session', 'object', '.']
train
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/hparams/list_session_groups.py#L132-L145
778
CenturyLinkCloud/clc-python-sdk
src/clc/APIv2/network.py
Network.Update
def Update(self,name,description=None,location=None): """Updates the attributes of a given Network via PUT. https://www.ctl.io/api-docs/v2/#networks-update-network { "name": "VLAN for Development Servers", "description": "Development Servers on 11.22.33.0/24" } Returns a 204 and no content """ if not location: location = clc.v2.Account.GetLocation(session=self.session) payload = {'name': name} payload['description'] = description if description else self.description r = clc.v2.API.Call('PUT','/v2-experimental/networks/%s/%s/%s' % (self.alias, location, self.id), payload, session=self.session) self.name = self.data['name'] = name if description: self.data['description'] = description
python
def Update(self,name,description=None,location=None): """Updates the attributes of a given Network via PUT. https://www.ctl.io/api-docs/v2/#networks-update-network { "name": "VLAN for Development Servers", "description": "Development Servers on 11.22.33.0/24" } Returns a 204 and no content """ if not location: location = clc.v2.Account.GetLocation(session=self.session) payload = {'name': name} payload['description'] = description if description else self.description r = clc.v2.API.Call('PUT','/v2-experimental/networks/%s/%s/%s' % (self.alias, location, self.id), payload, session=self.session) self.name = self.data['name'] = name if description: self.data['description'] = description
['def', 'Update', '(', 'self', ',', 'name', ',', 'description', '=', 'None', ',', 'location', '=', 'None', ')', ':', 'if', 'not', 'location', ':', 'location', '=', 'clc', '.', 'v2', '.', 'Account', '.', 'GetLocation', '(', 'session', '=', 'self', '.', 'session', ')', 'payload', '=', '{', "'name'", ':', 'name', '}', 'payload', '[', "'description'", ']', '=', 'description', 'if', 'description', 'else', 'self', '.', 'description', 'r', '=', 'clc', '.', 'v2', '.', 'API', '.', 'Call', '(', "'PUT'", ',', "'/v2-experimental/networks/%s/%s/%s'", '%', '(', 'self', '.', 'alias', ',', 'location', ',', 'self', '.', 'id', ')', ',', 'payload', ',', 'session', '=', 'self', '.', 'session', ')', 'self', '.', 'name', '=', 'self', '.', 'data', '[', "'name'", ']', '=', 'name', 'if', 'description', ':', 'self', '.', 'data', '[', "'description'", ']', '=', 'description']
Updates the attributes of a given Network via PUT. https://www.ctl.io/api-docs/v2/#networks-update-network { "name": "VLAN for Development Servers", "description": "Development Servers on 11.22.33.0/24" } Returns a 204 and no content
['Updates', 'the', 'attributes', 'of', 'a', 'given', 'Network', 'via', 'PUT', '.']
train
https://github.com/CenturyLinkCloud/clc-python-sdk/blob/f4dba40c627cb08dd4b7d0d277e8d67578010b05/src/clc/APIv2/network.py#L131-L152
779
mattlong/hermes
hermes/server.py
_listen
def _listen(sockets): """Main server loop. Listens for incoming events and dispatches them to appropriate chatroom""" while True: (i , o, e) = select.select(sockets.keys(),[],[],1) for socket in i: if isinstance(sockets[socket], Chatroom): data_len = sockets[socket].client.Process(1) if data_len is None or data_len == 0: raise Exception('Disconnected from server') #elif sockets[socket] == 'stdio': # msg = sys.stdin.readline().rstrip('\r\n') # logger.info('stdin: [%s]' % (msg,)) else: raise Exception("Unknown socket type: %s" % repr(sockets[socket]))
python
def _listen(sockets): """Main server loop. Listens for incoming events and dispatches them to appropriate chatroom""" while True: (i , o, e) = select.select(sockets.keys(),[],[],1) for socket in i: if isinstance(sockets[socket], Chatroom): data_len = sockets[socket].client.Process(1) if data_len is None or data_len == 0: raise Exception('Disconnected from server') #elif sockets[socket] == 'stdio': # msg = sys.stdin.readline().rstrip('\r\n') # logger.info('stdin: [%s]' % (msg,)) else: raise Exception("Unknown socket type: %s" % repr(sockets[socket]))
['def', '_listen', '(', 'sockets', ')', ':', 'while', 'True', ':', '(', 'i', ',', 'o', ',', 'e', ')', '=', 'select', '.', 'select', '(', 'sockets', '.', 'keys', '(', ')', ',', '[', ']', ',', '[', ']', ',', '1', ')', 'for', 'socket', 'in', 'i', ':', 'if', 'isinstance', '(', 'sockets', '[', 'socket', ']', ',', 'Chatroom', ')', ':', 'data_len', '=', 'sockets', '[', 'socket', ']', '.', 'client', '.', 'Process', '(', '1', ')', 'if', 'data_len', 'is', 'None', 'or', 'data_len', '==', '0', ':', 'raise', 'Exception', '(', "'Disconnected from server'", ')', "#elif sockets[socket] == 'stdio':", "# msg = sys.stdin.readline().rstrip('\\r\\n')", "# logger.info('stdin: [%s]' % (msg,))", 'else', ':', 'raise', 'Exception', '(', '"Unknown socket type: %s"', '%', 'repr', '(', 'sockets', '[', 'socket', ']', ')', ')']
Main server loop. Listens for incoming events and dispatches them to appropriate chatroom
['Main', 'server', 'loop', '.', 'Listens', 'for', 'incoming', 'events', 'and', 'dispatches', 'them', 'to', 'appropriate', 'chatroom']
train
https://github.com/mattlong/hermes/blob/63a5afcafe90ca99aeb44edeee9ed6f90baae431/hermes/server.py#L78-L91
780
insilichem/ommprotocol
ommprotocol/io.py
SerializedReporter.report
def report(self, simulation, state): """Generate a report. Parameters ---------- simulation : Simulation The Simulation to generate a report for state : State The current state of the simulation """ if not self._initialized: self._initialized = True self._steps[0] += self.interval positions = state.getPositions() # Serialize self._out.write(b''.join([b'\nSTARTOFCHUNK\n', pickle.dumps([self._steps[0], positions._value]), b'\nENDOFCHUNK\n'])) if hasattr(self._out, 'flush'): self._out.flush()
python
def report(self, simulation, state): """Generate a report. Parameters ---------- simulation : Simulation The Simulation to generate a report for state : State The current state of the simulation """ if not self._initialized: self._initialized = True self._steps[0] += self.interval positions = state.getPositions() # Serialize self._out.write(b''.join([b'\nSTARTOFCHUNK\n', pickle.dumps([self._steps[0], positions._value]), b'\nENDOFCHUNK\n'])) if hasattr(self._out, 'flush'): self._out.flush()
['def', 'report', '(', 'self', ',', 'simulation', ',', 'state', ')', ':', 'if', 'not', 'self', '.', '_initialized', ':', 'self', '.', '_initialized', '=', 'True', 'self', '.', '_steps', '[', '0', ']', '+=', 'self', '.', 'interval', 'positions', '=', 'state', '.', 'getPositions', '(', ')', '# Serialize', 'self', '.', '_out', '.', 'write', '(', "b''", '.', 'join', '(', '[', "b'\\nSTARTOFCHUNK\\n'", ',', 'pickle', '.', 'dumps', '(', '[', 'self', '.', '_steps', '[', '0', ']', ',', 'positions', '.', '_value', ']', ')', ',', "b'\\nENDOFCHUNK\\n'", ']', ')', ')', 'if', 'hasattr', '(', 'self', '.', '_out', ',', "'flush'", ')', ':', 'self', '.', '_out', '.', 'flush', '(', ')']
Generate a report. Parameters ---------- simulation : Simulation The Simulation to generate a report for state : State The current state of the simulation
['Generate', 'a', 'report', '.']
train
https://github.com/insilichem/ommprotocol/blob/7283fddba7203e5ac3542fdab41fc1279d3b444e/ommprotocol/io.py#L827-L848
781
timothyhahn/rui
rui/rui.py
World.remove_system
def remove_system(self, system): ''' Removes system from world and kills system ''' if system in self._systems: self._systems.remove(system) else: raise UnmanagedSystemError(system)
python
def remove_system(self, system): ''' Removes system from world and kills system ''' if system in self._systems: self._systems.remove(system) else: raise UnmanagedSystemError(system)
['def', 'remove_system', '(', 'self', ',', 'system', ')', ':', 'if', 'system', 'in', 'self', '.', '_systems', ':', 'self', '.', '_systems', '.', 'remove', '(', 'system', ')', 'else', ':', 'raise', 'UnmanagedSystemError', '(', 'system', ')']
Removes system from world and kills system
['Removes', 'system', 'from', 'world', 'and', 'kills', 'system']
train
https://github.com/timothyhahn/rui/blob/ac9f587fb486760d77332866c6e876f78a810f74/rui/rui.py#L99-L106
782
PmagPy/PmagPy
pmagpy/pmag.py
doeigs_s
def doeigs_s(tau, Vdirs): """ get elements of s from eigenvaulues - note that this is very unstable Input: tau,V: tau is an list of eigenvalues in decreasing order: [t1,t2,t3] V is an list of the eigenvector directions [[V1_dec,V1_inc],[V2_dec,V2_inc],[V3_dec,V3_inc]] Output: The six tensor elements as a list: s=[x11,x22,x33,x12,x23,x13] """ t = np.zeros((3, 3,), 'f') # initialize the tau diagonal matrix V = [] for j in range(3): t[j][j] = tau[j] # diagonalize tau for k in range(3): V.append(dir2cart([Vdirs[k][0], Vdirs[k][1], 1.0])) V = np.transpose(V) tmp = np.dot(V, t) chi = np.dot(tmp, np.transpose(V)) return a2s(chi)
python
def doeigs_s(tau, Vdirs): """ get elements of s from eigenvaulues - note that this is very unstable Input: tau,V: tau is an list of eigenvalues in decreasing order: [t1,t2,t3] V is an list of the eigenvector directions [[V1_dec,V1_inc],[V2_dec,V2_inc],[V3_dec,V3_inc]] Output: The six tensor elements as a list: s=[x11,x22,x33,x12,x23,x13] """ t = np.zeros((3, 3,), 'f') # initialize the tau diagonal matrix V = [] for j in range(3): t[j][j] = tau[j] # diagonalize tau for k in range(3): V.append(dir2cart([Vdirs[k][0], Vdirs[k][1], 1.0])) V = np.transpose(V) tmp = np.dot(V, t) chi = np.dot(tmp, np.transpose(V)) return a2s(chi)
['def', 'doeigs_s', '(', 'tau', ',', 'Vdirs', ')', ':', 't', '=', 'np', '.', 'zeros', '(', '(', '3', ',', '3', ',', ')', ',', "'f'", ')', '# initialize the tau diagonal matrix', 'V', '=', '[', ']', 'for', 'j', 'in', 'range', '(', '3', ')', ':', 't', '[', 'j', ']', '[', 'j', ']', '=', 'tau', '[', 'j', ']', '# diagonalize tau', 'for', 'k', 'in', 'range', '(', '3', ')', ':', 'V', '.', 'append', '(', 'dir2cart', '(', '[', 'Vdirs', '[', 'k', ']', '[', '0', ']', ',', 'Vdirs', '[', 'k', ']', '[', '1', ']', ',', '1.0', ']', ')', ')', 'V', '=', 'np', '.', 'transpose', '(', 'V', ')', 'tmp', '=', 'np', '.', 'dot', '(', 'V', ',', 't', ')', 'chi', '=', 'np', '.', 'dot', '(', 'tmp', ',', 'np', '.', 'transpose', '(', 'V', ')', ')', 'return', 'a2s', '(', 'chi', ')']
get elements of s from eigenvaulues - note that this is very unstable Input: tau,V: tau is an list of eigenvalues in decreasing order: [t1,t2,t3] V is an list of the eigenvector directions [[V1_dec,V1_inc],[V2_dec,V2_inc],[V3_dec,V3_inc]] Output: The six tensor elements as a list: s=[x11,x22,x33,x12,x23,x13]
['get', 'elements', 'of', 's', 'from', 'eigenvaulues', '-', 'note', 'that', 'this', 'is', 'very', 'unstable', 'Input', ':', 'tau', 'V', ':', 'tau', 'is', 'an', 'list', 'of', 'eigenvalues', 'in', 'decreasing', 'order', ':', '[', 't1', 't2', 't3', ']', 'V', 'is', 'an', 'list', 'of', 'the', 'eigenvector', 'directions', '[[', 'V1_dec', 'V1_inc', ']', '[', 'V2_dec', 'V2_inc', ']', '[', 'V3_dec', 'V3_inc', ']]', 'Output', ':', 'The', 'six', 'tensor', 'elements', 'as', 'a', 'list', ':', 's', '=', '[', 'x11', 'x22', 'x33', 'x12', 'x23', 'x13', ']']
train
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L5677-L5700
783
BYU-PCCL/holodeck
holodeck/environments.py
HolodeckEnvironment.set_day_time
def set_day_time(self, hour): """Queue up a change day time command. It will be applied when `tick` or `step` is called next. By the next tick, the lighting and the skysphere will be updated with the new hour. If there is no skysphere or directional light in the world, the command will not function properly but will not cause a crash. Args: hour (int): The hour in military time, between 0 and 23 inclusive. """ self._should_write_to_command_buffer = True command_to_send = DayTimeCommand(hour % 24) self._commands.add_command(command_to_send)
python
def set_day_time(self, hour): """Queue up a change day time command. It will be applied when `tick` or `step` is called next. By the next tick, the lighting and the skysphere will be updated with the new hour. If there is no skysphere or directional light in the world, the command will not function properly but will not cause a crash. Args: hour (int): The hour in military time, between 0 and 23 inclusive. """ self._should_write_to_command_buffer = True command_to_send = DayTimeCommand(hour % 24) self._commands.add_command(command_to_send)
['def', 'set_day_time', '(', 'self', ',', 'hour', ')', ':', 'self', '.', '_should_write_to_command_buffer', '=', 'True', 'command_to_send', '=', 'DayTimeCommand', '(', 'hour', '%', '24', ')', 'self', '.', '_commands', '.', 'add_command', '(', 'command_to_send', ')']
Queue up a change day time command. It will be applied when `tick` or `step` is called next. By the next tick, the lighting and the skysphere will be updated with the new hour. If there is no skysphere or directional light in the world, the command will not function properly but will not cause a crash. Args: hour (int): The hour in military time, between 0 and 23 inclusive.
['Queue', 'up', 'a', 'change', 'day', 'time', 'command', '.', 'It', 'will', 'be', 'applied', 'when', 'tick', 'or', 'step', 'is', 'called', 'next', '.', 'By', 'the', 'next', 'tick', 'the', 'lighting', 'and', 'the', 'skysphere', 'will', 'be', 'updated', 'with', 'the', 'new', 'hour', '.', 'If', 'there', 'is', 'no', 'skysphere', 'or', 'directional', 'light', 'in', 'the', 'world', 'the', 'command', 'will', 'not', 'function', 'properly', 'but', 'will', 'not', 'cause', 'a', 'crash', '.']
train
https://github.com/BYU-PCCL/holodeck/blob/01acd4013f5acbd9f61fbc9caaafe19975e8b121/holodeck/environments.py#L291-L301
784
spacetelescope/drizzlepac
drizzlepac/buildmask.py
buildMask
def buildMask(dqarr, bitvalue): """ Builds a bit-mask from an input DQ array and a bitvalue flag """ return bitfield_to_boolean_mask(dqarr, bitvalue, good_mask_value=1, dtype=np.uint8)
python
def buildMask(dqarr, bitvalue): """ Builds a bit-mask from an input DQ array and a bitvalue flag """ return bitfield_to_boolean_mask(dqarr, bitvalue, good_mask_value=1, dtype=np.uint8)
['def', 'buildMask', '(', 'dqarr', ',', 'bitvalue', ')', ':', 'return', 'bitfield_to_boolean_mask', '(', 'dqarr', ',', 'bitvalue', ',', 'good_mask_value', '=', '1', ',', 'dtype', '=', 'np', '.', 'uint8', ')']
Builds a bit-mask from an input DQ array and a bitvalue flag
['Builds', 'a', 'bit', '-', 'mask', 'from', 'an', 'input', 'DQ', 'array', 'and', 'a', 'bitvalue', 'flag']
train
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/buildmask.py#L82-L85
785
apple/turicreate
src/unity/python/turicreate/toolkits/classifier/nearest_neighbor_classifier.py
NearestNeighborClassifier._load_version
def _load_version(cls, state, version): """ A function to load a previously saved NearestNeighborClassifier model. Parameters ---------- unpickler : GLUnpickler A GLUnpickler file handler. version : int Version number maintained by the class writer. """ assert(version == cls._PYTHON_NN_CLASSIFIER_MODEL_VERSION) knn_model = _tc.nearest_neighbors.NearestNeighborsModel(state['knn_model']) del state['knn_model'] state['_target_type'] = eval(state['_target_type']) return cls(knn_model, state)
python
def _load_version(cls, state, version): """ A function to load a previously saved NearestNeighborClassifier model. Parameters ---------- unpickler : GLUnpickler A GLUnpickler file handler. version : int Version number maintained by the class writer. """ assert(version == cls._PYTHON_NN_CLASSIFIER_MODEL_VERSION) knn_model = _tc.nearest_neighbors.NearestNeighborsModel(state['knn_model']) del state['knn_model'] state['_target_type'] = eval(state['_target_type']) return cls(knn_model, state)
['def', '_load_version', '(', 'cls', ',', 'state', ',', 'version', ')', ':', 'assert', '(', 'version', '==', 'cls', '.', '_PYTHON_NN_CLASSIFIER_MODEL_VERSION', ')', 'knn_model', '=', '_tc', '.', 'nearest_neighbors', '.', 'NearestNeighborsModel', '(', 'state', '[', "'knn_model'", ']', ')', 'del', 'state', '[', "'knn_model'", ']', 'state', '[', "'_target_type'", ']', '=', 'eval', '(', 'state', '[', "'_target_type'", ']', ')', 'return', 'cls', '(', 'knn_model', ',', 'state', ')']
A function to load a previously saved NearestNeighborClassifier model. Parameters ---------- unpickler : GLUnpickler A GLUnpickler file handler. version : int Version number maintained by the class writer.
['A', 'function', 'to', 'load', 'a', 'previously', 'saved', 'NearestNeighborClassifier', 'model', '.']
train
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/classifier/nearest_neighbor_classifier.py#L353-L369
786
Valuehorizon/valuehorizon-people
people/models.py
Person.age
def age(self, as_at_date=None): """ Compute the person's age """ if self.date_of_death != None or self.is_deceased == True: return None as_at_date = date.today() if as_at_date == None else as_at_date if self.date_of_birth != None: if (as_at_date.month >= self.date_of_birth.month) and (as_at_date.day >= self.date_of_birth.day): return (as_at_date.year - self.date_of_birth.year) else: return ((as_at_date.year - self.date_of_birth.year) -1) else: return None
python
def age(self, as_at_date=None): """ Compute the person's age """ if self.date_of_death != None or self.is_deceased == True: return None as_at_date = date.today() if as_at_date == None else as_at_date if self.date_of_birth != None: if (as_at_date.month >= self.date_of_birth.month) and (as_at_date.day >= self.date_of_birth.day): return (as_at_date.year - self.date_of_birth.year) else: return ((as_at_date.year - self.date_of_birth.year) -1) else: return None
['def', 'age', '(', 'self', ',', 'as_at_date', '=', 'None', ')', ':', 'if', 'self', '.', 'date_of_death', '!=', 'None', 'or', 'self', '.', 'is_deceased', '==', 'True', ':', 'return', 'None', 'as_at_date', '=', 'date', '.', 'today', '(', ')', 'if', 'as_at_date', '==', 'None', 'else', 'as_at_date', 'if', 'self', '.', 'date_of_birth', '!=', 'None', ':', 'if', '(', 'as_at_date', '.', 'month', '>=', 'self', '.', 'date_of_birth', '.', 'month', ')', 'and', '(', 'as_at_date', '.', 'day', '>=', 'self', '.', 'date_of_birth', '.', 'day', ')', ':', 'return', '(', 'as_at_date', '.', 'year', '-', 'self', '.', 'date_of_birth', '.', 'year', ')', 'else', ':', 'return', '(', '(', 'as_at_date', '.', 'year', '-', 'self', '.', 'date_of_birth', '.', 'year', ')', '-', '1', ')', 'else', ':', 'return', 'None']
Compute the person's age
['Compute', 'the', 'person', 's', 'age']
train
https://github.com/Valuehorizon/valuehorizon-people/blob/f32d9f1349c1a9384bae5ea61d10c1b1e0318401/people/models.py#L53-L68
787
ga4gh/ga4gh-server
ga4gh/server/datamodel/obo_parser.py
GODag.write_hier_all
def write_hier_all(self, out=sys.stdout, len_dash=1, max_depth=None, num_child=None, short_prt=False): """Write hierarchy for all GO Terms in obo file.""" # Print: [biological_process, molecular_function, and cellular_component] for go_id in ['GO:0008150', 'GO:0003674', 'GO:0005575']: self.write_hier(go_id, out, len_dash, max_depth, num_child, short_prt, None)
python
def write_hier_all(self, out=sys.stdout, len_dash=1, max_depth=None, num_child=None, short_prt=False): """Write hierarchy for all GO Terms in obo file.""" # Print: [biological_process, molecular_function, and cellular_component] for go_id in ['GO:0008150', 'GO:0003674', 'GO:0005575']: self.write_hier(go_id, out, len_dash, max_depth, num_child, short_prt, None)
['def', 'write_hier_all', '(', 'self', ',', 'out', '=', 'sys', '.', 'stdout', ',', 'len_dash', '=', '1', ',', 'max_depth', '=', 'None', ',', 'num_child', '=', 'None', ',', 'short_prt', '=', 'False', ')', ':', '# Print: [biological_process, molecular_function, and cellular_component]', 'for', 'go_id', 'in', '[', "'GO:0008150'", ',', "'GO:0003674'", ',', "'GO:0005575'", ']', ':', 'self', '.', 'write_hier', '(', 'go_id', ',', 'out', ',', 'len_dash', ',', 'max_depth', ',', 'num_child', ',', 'short_prt', ',', 'None', ')']
Write hierarchy for all GO Terms in obo file.
['Write', 'hierarchy', 'for', 'all', 'GO', 'Terms', 'in', 'obo', 'file', '.']
train
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datamodel/obo_parser.py#L492-L497
788
tanghaibao/jcvi
jcvi/utils/grouper.py
Grouper.joined
def joined(self, a, b): """ Returns True if a and b are members of the same set. """ mapping = self._mapping try: return mapping[a] is mapping[b] except KeyError: return False
python
def joined(self, a, b): """ Returns True if a and b are members of the same set. """ mapping = self._mapping try: return mapping[a] is mapping[b] except KeyError: return False
['def', 'joined', '(', 'self', ',', 'a', ',', 'b', ')', ':', 'mapping', '=', 'self', '.', '_mapping', 'try', ':', 'return', 'mapping', '[', 'a', ']', 'is', 'mapping', '[', 'b', ']', 'except', 'KeyError', ':', 'return', 'False']
Returns True if a and b are members of the same set.
['Returns', 'True', 'if', 'a', 'and', 'b', 'are', 'members', 'of', 'the', 'same', 'set', '.']
train
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/grouper.py#L63-L71
789
9b/google-alerts
google_alerts/__init__.py
GoogleAlerts.modify
def modify(self, monitor_id, options): """Create a monitor using passed configuration.""" if not self._state: raise InvalidState("State was not properly obtained from the app") monitors = self.list() # Get the latest set of monitors obj = None for monitor in monitors: if monitor_id != monitor['monitor_id']: continue obj = monitor if not monitor_id: raise MonitorNotFound("No monitor was found with that term.") options['action'] = 'MODIFY' options.update(obj) payload = self._build_payload(obj['term'], options) url = self.ALERTS_MODIFY_URL.format(requestX=self._state[3]) self._log.debug("Modifying alert using: %s" % url) params = json.dumps(payload, separators=(',', ':')) data = {'params': params} response = self._session.post(url, data=data, headers=self.HEADERS) if response.status_code != 200: raise ActionError("Failed to create monitor: %s" % response.content) return self.list()
python
def modify(self, monitor_id, options): """Create a monitor using passed configuration.""" if not self._state: raise InvalidState("State was not properly obtained from the app") monitors = self.list() # Get the latest set of monitors obj = None for monitor in monitors: if monitor_id != monitor['monitor_id']: continue obj = monitor if not monitor_id: raise MonitorNotFound("No monitor was found with that term.") options['action'] = 'MODIFY' options.update(obj) payload = self._build_payload(obj['term'], options) url = self.ALERTS_MODIFY_URL.format(requestX=self._state[3]) self._log.debug("Modifying alert using: %s" % url) params = json.dumps(payload, separators=(',', ':')) data = {'params': params} response = self._session.post(url, data=data, headers=self.HEADERS) if response.status_code != 200: raise ActionError("Failed to create monitor: %s" % response.content) return self.list()
['def', 'modify', '(', 'self', ',', 'monitor_id', ',', 'options', ')', ':', 'if', 'not', 'self', '.', '_state', ':', 'raise', 'InvalidState', '(', '"State was not properly obtained from the app"', ')', 'monitors', '=', 'self', '.', 'list', '(', ')', '# Get the latest set of monitors', 'obj', '=', 'None', 'for', 'monitor', 'in', 'monitors', ':', 'if', 'monitor_id', '!=', 'monitor', '[', "'monitor_id'", ']', ':', 'continue', 'obj', '=', 'monitor', 'if', 'not', 'monitor_id', ':', 'raise', 'MonitorNotFound', '(', '"No monitor was found with that term."', ')', 'options', '[', "'action'", ']', '=', "'MODIFY'", 'options', '.', 'update', '(', 'obj', ')', 'payload', '=', 'self', '.', '_build_payload', '(', 'obj', '[', "'term'", ']', ',', 'options', ')', 'url', '=', 'self', '.', 'ALERTS_MODIFY_URL', '.', 'format', '(', 'requestX', '=', 'self', '.', '_state', '[', '3', ']', ')', 'self', '.', '_log', '.', 'debug', '(', '"Modifying alert using: %s"', '%', 'url', ')', 'params', '=', 'json', '.', 'dumps', '(', 'payload', ',', 'separators', '=', '(', "','", ',', "':'", ')', ')', 'data', '=', '{', "'params'", ':', 'params', '}', 'response', '=', 'self', '.', '_session', '.', 'post', '(', 'url', ',', 'data', '=', 'data', ',', 'headers', '=', 'self', '.', 'HEADERS', ')', 'if', 'response', '.', 'status_code', '!=', '200', ':', 'raise', 'ActionError', '(', '"Failed to create monitor: %s"', '%', 'response', '.', 'content', ')', 'return', 'self', '.', 'list', '(', ')']
Create a monitor using passed configuration.
['Create', 'a', 'monitor', 'using', 'passed', 'configuration', '.']
train
https://github.com/9b/google-alerts/blob/69a502cbcccf1bcafe963ed40d286441b0117e04/google_alerts/__init__.py#L357-L380
790
nerdvegas/rez
src/rez/utils/filesystem.py
find_matching_symlink
def find_matching_symlink(path, source): """Find a symlink under `path` that points at `source`. If source is relative, it is considered relative to `path`. Returns: str: Name of symlink found, or None. """ def to_abs(target): if os.path.isabs(target): return target else: return os.path.normpath(os.path.join(path, target)) abs_source = to_abs(source) for name in os.listdir(path): linkpath = os.path.join(path, name) if os.path.islink: source_ = os.readlink(linkpath) if to_abs(source_) == abs_source: return name return None
python
def find_matching_symlink(path, source): """Find a symlink under `path` that points at `source`. If source is relative, it is considered relative to `path`. Returns: str: Name of symlink found, or None. """ def to_abs(target): if os.path.isabs(target): return target else: return os.path.normpath(os.path.join(path, target)) abs_source = to_abs(source) for name in os.listdir(path): linkpath = os.path.join(path, name) if os.path.islink: source_ = os.readlink(linkpath) if to_abs(source_) == abs_source: return name return None
['def', 'find_matching_symlink', '(', 'path', ',', 'source', ')', ':', 'def', 'to_abs', '(', 'target', ')', ':', 'if', 'os', '.', 'path', '.', 'isabs', '(', 'target', ')', ':', 'return', 'target', 'else', ':', 'return', 'os', '.', 'path', '.', 'normpath', '(', 'os', '.', 'path', '.', 'join', '(', 'path', ',', 'target', ')', ')', 'abs_source', '=', 'to_abs', '(', 'source', ')', 'for', 'name', 'in', 'os', '.', 'listdir', '(', 'path', ')', ':', 'linkpath', '=', 'os', '.', 'path', '.', 'join', '(', 'path', ',', 'name', ')', 'if', 'os', '.', 'path', '.', 'islink', ':', 'source_', '=', 'os', '.', 'readlink', '(', 'linkpath', ')', 'if', 'to_abs', '(', 'source_', ')', '==', 'abs_source', ':', 'return', 'name', 'return', 'None']
Find a symlink under `path` that points at `source`. If source is relative, it is considered relative to `path`. Returns: str: Name of symlink found, or None.
['Find', 'a', 'symlink', 'under', 'path', 'that', 'points', 'at', 'source', '.']
train
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/filesystem.py#L291-L314
791
pysathq/pysat
pysat/solvers.py
Minisat22.get_model
def get_model(self): """ Get a model if the formula was previously satisfied. """ if self.minisat and self.status == True: model = pysolvers.minisat22_model(self.minisat) return model if model != None else []
python
def get_model(self): """ Get a model if the formula was previously satisfied. """ if self.minisat and self.status == True: model = pysolvers.minisat22_model(self.minisat) return model if model != None else []
['def', 'get_model', '(', 'self', ')', ':', 'if', 'self', '.', 'minisat', 'and', 'self', '.', 'status', '==', 'True', ':', 'model', '=', 'pysolvers', '.', 'minisat22_model', '(', 'self', '.', 'minisat', ')', 'return', 'model', 'if', 'model', '!=', 'None', 'else', '[', ']']
Get a model if the formula was previously satisfied.
['Get', 'a', 'model', 'if', 'the', 'formula', 'was', 'previously', 'satisfied', '.']
train
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/pysat/solvers.py#L3072-L3079
792
RedHatInsights/insights-core
insights/core/ls_parser.py
parse_selinux
def parse_selinux(parts): """ Parse part of an ls output line that is selinux. Args: parts (list): A four element list of strings representing the initial parts of an ls line after the permission bits. The parts are owner group, selinux info, and the path. Returns: A dict containing owner, group, se_user, se_role, se_type, se_mls, and name. If the raw name was a symbolic link, link is always included. """ owner, group = parts[:2] selinux = parts[2].split(":") lsel = len(selinux) path, link = parse_path(parts[-1]) result = { "owner": owner, "group": group, "se_user": selinux[0], "se_role": selinux[1] if lsel > 1 else None, "se_type": selinux[2] if lsel > 2 else None, "se_mls": selinux[3] if lsel > 3 else None, "name": path } if link: result["link"] = link return result
python
def parse_selinux(parts): """ Parse part of an ls output line that is selinux. Args: parts (list): A four element list of strings representing the initial parts of an ls line after the permission bits. The parts are owner group, selinux info, and the path. Returns: A dict containing owner, group, se_user, se_role, se_type, se_mls, and name. If the raw name was a symbolic link, link is always included. """ owner, group = parts[:2] selinux = parts[2].split(":") lsel = len(selinux) path, link = parse_path(parts[-1]) result = { "owner": owner, "group": group, "se_user": selinux[0], "se_role": selinux[1] if lsel > 1 else None, "se_type": selinux[2] if lsel > 2 else None, "se_mls": selinux[3] if lsel > 3 else None, "name": path } if link: result["link"] = link return result
['def', 'parse_selinux', '(', 'parts', ')', ':', 'owner', ',', 'group', '=', 'parts', '[', ':', '2', ']', 'selinux', '=', 'parts', '[', '2', ']', '.', 'split', '(', '":"', ')', 'lsel', '=', 'len', '(', 'selinux', ')', 'path', ',', 'link', '=', 'parse_path', '(', 'parts', '[', '-', '1', ']', ')', 'result', '=', '{', '"owner"', ':', 'owner', ',', '"group"', ':', 'group', ',', '"se_user"', ':', 'selinux', '[', '0', ']', ',', '"se_role"', ':', 'selinux', '[', '1', ']', 'if', 'lsel', '>', '1', 'else', 'None', ',', '"se_type"', ':', 'selinux', '[', '2', ']', 'if', 'lsel', '>', '2', 'else', 'None', ',', '"se_mls"', ':', 'selinux', '[', '3', ']', 'if', 'lsel', '>', '3', 'else', 'None', ',', '"name"', ':', 'path', '}', 'if', 'link', ':', 'result', '[', '"link"', ']', '=', 'link', 'return', 'result']
Parse part of an ls output line that is selinux. Args: parts (list): A four element list of strings representing the initial parts of an ls line after the permission bits. The parts are owner group, selinux info, and the path. Returns: A dict containing owner, group, se_user, se_role, se_type, se_mls, and name. If the raw name was a symbolic link, link is always included.
['Parse', 'part', 'of', 'an', 'ls', 'output', 'line', 'that', 'is', 'selinux', '.']
train
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/ls_parser.py#L68-L98
793
python-diamond/Diamond
src/collectors/smart/smart.py
SmartCollector.collect
def collect(self): """ Collect and publish S.M.A.R.T. attributes """ devices = re.compile(self.config['devices']) for device in os.listdir('/dev'): if devices.match(device): command = [self.config['bin'], "-A", os.path.join('/dev', device)] if str_to_bool(self.config['use_sudo']): command.insert(0, self.config['sudo_cmd']) attributes = subprocess.Popen( command, stdout=subprocess.PIPE ).communicate()[0].strip().splitlines() metrics = {} start_line = self.find_attr_start_line(attributes) for attr in attributes[start_line:]: attribute = attr.split() if attribute[1] != "Unknown_Attribute": metric = "%s.%s" % (device, attribute[1]) else: metric = "%s.%s" % (device, attribute[0]) # 234 Thermal_Throttle (...) 0/0 if '/' in attribute[9]: expanded = attribute[9].split('/') for i, subattribute in enumerate(expanded): submetric = '%s_%d' % (metric, i) if submetric not in metrics: metrics[submetric] = subattribute elif metrics[submetric] == 0 and subattribute > 0: metrics[submetric] = subattribute else: # New metric? Store it if metric not in metrics: metrics[metric] = attribute[9] # Duplicate metric? Only store if it has a larger value # This happens semi-often with the Temperature_Celsius # attribute You will have a PASS/FAIL after the real # temp, so only overwrite if The earlier one was a # PASS/FAIL (0/1) elif metrics[metric] == 0 and attribute[9] > 0: metrics[metric] = attribute[9] else: continue for metric in metrics.keys(): self.publish(metric, metrics[metric])
python
def collect(self): """ Collect and publish S.M.A.R.T. attributes """ devices = re.compile(self.config['devices']) for device in os.listdir('/dev'): if devices.match(device): command = [self.config['bin'], "-A", os.path.join('/dev', device)] if str_to_bool(self.config['use_sudo']): command.insert(0, self.config['sudo_cmd']) attributes = subprocess.Popen( command, stdout=subprocess.PIPE ).communicate()[0].strip().splitlines() metrics = {} start_line = self.find_attr_start_line(attributes) for attr in attributes[start_line:]: attribute = attr.split() if attribute[1] != "Unknown_Attribute": metric = "%s.%s" % (device, attribute[1]) else: metric = "%s.%s" % (device, attribute[0]) # 234 Thermal_Throttle (...) 0/0 if '/' in attribute[9]: expanded = attribute[9].split('/') for i, subattribute in enumerate(expanded): submetric = '%s_%d' % (metric, i) if submetric not in metrics: metrics[submetric] = subattribute elif metrics[submetric] == 0 and subattribute > 0: metrics[submetric] = subattribute else: # New metric? Store it if metric not in metrics: metrics[metric] = attribute[9] # Duplicate metric? Only store if it has a larger value # This happens semi-often with the Temperature_Celsius # attribute You will have a PASS/FAIL after the real # temp, so only overwrite if The earlier one was a # PASS/FAIL (0/1) elif metrics[metric] == 0 and attribute[9] > 0: metrics[metric] = attribute[9] else: continue for metric in metrics.keys(): self.publish(metric, metrics[metric])
['def', 'collect', '(', 'self', ')', ':', 'devices', '=', 're', '.', 'compile', '(', 'self', '.', 'config', '[', "'devices'", ']', ')', 'for', 'device', 'in', 'os', '.', 'listdir', '(', "'/dev'", ')', ':', 'if', 'devices', '.', 'match', '(', 'device', ')', ':', 'command', '=', '[', 'self', '.', 'config', '[', "'bin'", ']', ',', '"-A"', ',', 'os', '.', 'path', '.', 'join', '(', "'/dev'", ',', 'device', ')', ']', 'if', 'str_to_bool', '(', 'self', '.', 'config', '[', "'use_sudo'", ']', ')', ':', 'command', '.', 'insert', '(', '0', ',', 'self', '.', 'config', '[', "'sudo_cmd'", ']', ')', 'attributes', '=', 'subprocess', '.', 'Popen', '(', 'command', ',', 'stdout', '=', 'subprocess', '.', 'PIPE', ')', '.', 'communicate', '(', ')', '[', '0', ']', '.', 'strip', '(', ')', '.', 'splitlines', '(', ')', 'metrics', '=', '{', '}', 'start_line', '=', 'self', '.', 'find_attr_start_line', '(', 'attributes', ')', 'for', 'attr', 'in', 'attributes', '[', 'start_line', ':', ']', ':', 'attribute', '=', 'attr', '.', 'split', '(', ')', 'if', 'attribute', '[', '1', ']', '!=', '"Unknown_Attribute"', ':', 'metric', '=', '"%s.%s"', '%', '(', 'device', ',', 'attribute', '[', '1', ']', ')', 'else', ':', 'metric', '=', '"%s.%s"', '%', '(', 'device', ',', 'attribute', '[', '0', ']', ')', '# 234 Thermal_Throttle (...) 0/0', 'if', "'/'", 'in', 'attribute', '[', '9', ']', ':', 'expanded', '=', 'attribute', '[', '9', ']', '.', 'split', '(', "'/'", ')', 'for', 'i', ',', 'subattribute', 'in', 'enumerate', '(', 'expanded', ')', ':', 'submetric', '=', "'%s_%d'", '%', '(', 'metric', ',', 'i', ')', 'if', 'submetric', 'not', 'in', 'metrics', ':', 'metrics', '[', 'submetric', ']', '=', 'subattribute', 'elif', 'metrics', '[', 'submetric', ']', '==', '0', 'and', 'subattribute', '>', '0', ':', 'metrics', '[', 'submetric', ']', '=', 'subattribute', 'else', ':', '# New metric? Store it', 'if', 'metric', 'not', 'in', 'metrics', ':', 'metrics', '[', 'metric', ']', '=', 'attribute', '[', '9', ']', '# Duplicate metric? Only store if it has a larger value', '# This happens semi-often with the Temperature_Celsius', '# attribute You will have a PASS/FAIL after the real', '# temp, so only overwrite if The earlier one was a', '# PASS/FAIL (0/1)', 'elif', 'metrics', '[', 'metric', ']', '==', '0', 'and', 'attribute', '[', '9', ']', '>', '0', ':', 'metrics', '[', 'metric', ']', '=', 'attribute', '[', '9', ']', 'else', ':', 'continue', 'for', 'metric', 'in', 'metrics', '.', 'keys', '(', ')', ':', 'self', '.', 'publish', '(', 'metric', ',', 'metrics', '[', 'metric', ']', ')']
Collect and publish S.M.A.R.T. attributes
['Collect', 'and', 'publish', 'S', '.', 'M', '.', 'A', '.', 'R', '.', 'T', '.', 'attributes']
train
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/smart/smart.py#L45-L99
794
azraq27/neural
neural/alignment.py
convert_coord
def convert_coord(coord_from,matrix_file,base_to_aligned=True): '''Takes an XYZ array (in DICOM coordinates) and uses the matrix file produced by 3dAllineate to transform it. By default, the 3dAllineate matrix transforms from base to aligned space; to get the inverse transform set ``base_to_aligned`` to ``False``''' with open(matrix_file) as f: try: values = [float(y) for y in ' '.join([x for x in f.readlines() if x.strip()[0]!='#']).strip().split()] except: nl.notify('Error reading values from matrix file %s' % matrix_file, level=nl.level.error) return False if len(values)!=12: nl.notify('Error: found %d values in matrix file %s (expecting 12)' % (len(values),matrix_file), level=nl.level.error) return False matrix = np.vstack((np.array(values).reshape((3,-1)),[0,0,0,1])) if not base_to_aligned: matrix = np.linalg.inv(matrix) return np.dot(matrix,list(coord_from) + [1])[:3]
python
def convert_coord(coord_from,matrix_file,base_to_aligned=True): '''Takes an XYZ array (in DICOM coordinates) and uses the matrix file produced by 3dAllineate to transform it. By default, the 3dAllineate matrix transforms from base to aligned space; to get the inverse transform set ``base_to_aligned`` to ``False``''' with open(matrix_file) as f: try: values = [float(y) for y in ' '.join([x for x in f.readlines() if x.strip()[0]!='#']).strip().split()] except: nl.notify('Error reading values from matrix file %s' % matrix_file, level=nl.level.error) return False if len(values)!=12: nl.notify('Error: found %d values in matrix file %s (expecting 12)' % (len(values),matrix_file), level=nl.level.error) return False matrix = np.vstack((np.array(values).reshape((3,-1)),[0,0,0,1])) if not base_to_aligned: matrix = np.linalg.inv(matrix) return np.dot(matrix,list(coord_from) + [1])[:3]
['def', 'convert_coord', '(', 'coord_from', ',', 'matrix_file', ',', 'base_to_aligned', '=', 'True', ')', ':', 'with', 'open', '(', 'matrix_file', ')', 'as', 'f', ':', 'try', ':', 'values', '=', '[', 'float', '(', 'y', ')', 'for', 'y', 'in', "' '", '.', 'join', '(', '[', 'x', 'for', 'x', 'in', 'f', '.', 'readlines', '(', ')', 'if', 'x', '.', 'strip', '(', ')', '[', '0', ']', '!=', "'#'", ']', ')', '.', 'strip', '(', ')', '.', 'split', '(', ')', ']', 'except', ':', 'nl', '.', 'notify', '(', "'Error reading values from matrix file %s'", '%', 'matrix_file', ',', 'level', '=', 'nl', '.', 'level', '.', 'error', ')', 'return', 'False', 'if', 'len', '(', 'values', ')', '!=', '12', ':', 'nl', '.', 'notify', '(', "'Error: found %d values in matrix file %s (expecting 12)'", '%', '(', 'len', '(', 'values', ')', ',', 'matrix_file', ')', ',', 'level', '=', 'nl', '.', 'level', '.', 'error', ')', 'return', 'False', 'matrix', '=', 'np', '.', 'vstack', '(', '(', 'np', '.', 'array', '(', 'values', ')', '.', 'reshape', '(', '(', '3', ',', '-', '1', ')', ')', ',', '[', '0', ',', '0', ',', '0', ',', '1', ']', ')', ')', 'if', 'not', 'base_to_aligned', ':', 'matrix', '=', 'np', '.', 'linalg', '.', 'inv', '(', 'matrix', ')', 'return', 'np', '.', 'dot', '(', 'matrix', ',', 'list', '(', 'coord_from', ')', '+', '[', '1', ']', ')', '[', ':', '3', ']']
Takes an XYZ array (in DICOM coordinates) and uses the matrix file produced by 3dAllineate to transform it. By default, the 3dAllineate matrix transforms from base to aligned space; to get the inverse transform set ``base_to_aligned`` to ``False``
['Takes', 'an', 'XYZ', 'array', '(', 'in', 'DICOM', 'coordinates', ')', 'and', 'uses', 'the', 'matrix', 'file', 'produced', 'by', '3dAllineate', 'to', 'transform', 'it', '.', 'By', 'default', 'the', '3dAllineate', 'matrix', 'transforms', 'from', 'base', 'to', 'aligned', 'space', ';', 'to', 'get', 'the', 'inverse', 'transform', 'set', 'base_to_aligned', 'to', 'False']
train
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/alignment.py#L120-L135
795
arista-eosplus/pyeapi
pyeapi/client.py
Config.read
def read(self, filename): """Reads the file specified by filename This method will load the eapi.conf file specified by filename into the instance object. It will also add the default connection localhost if it was not defined in the eapi.conf file Args: filename (str): The full path to the file to load """ try: SafeConfigParser.read(self, filename) except SafeConfigParserError as exc: # Ignore file and syslog a message on SafeConfigParser errors msg = ("%s: parsing error in eapi conf file: %s" % (type(exc).__name__, filename)) debug(msg) self._add_default_connection() for name in self.sections(): if name.startswith('connection:') and \ 'host' not in dict(self.items(name)): self.set(name, 'host', name.split(':')[1]) self.generate_tags()
python
def read(self, filename): """Reads the file specified by filename This method will load the eapi.conf file specified by filename into the instance object. It will also add the default connection localhost if it was not defined in the eapi.conf file Args: filename (str): The full path to the file to load """ try: SafeConfigParser.read(self, filename) except SafeConfigParserError as exc: # Ignore file and syslog a message on SafeConfigParser errors msg = ("%s: parsing error in eapi conf file: %s" % (type(exc).__name__, filename)) debug(msg) self._add_default_connection() for name in self.sections(): if name.startswith('connection:') and \ 'host' not in dict(self.items(name)): self.set(name, 'host', name.split(':')[1]) self.generate_tags()
['def', 'read', '(', 'self', ',', 'filename', ')', ':', 'try', ':', 'SafeConfigParser', '.', 'read', '(', 'self', ',', 'filename', ')', 'except', 'SafeConfigParserError', 'as', 'exc', ':', '# Ignore file and syslog a message on SafeConfigParser errors', 'msg', '=', '(', '"%s: parsing error in eapi conf file: %s"', '%', '(', 'type', '(', 'exc', ')', '.', '__name__', ',', 'filename', ')', ')', 'debug', '(', 'msg', ')', 'self', '.', '_add_default_connection', '(', ')', 'for', 'name', 'in', 'self', '.', 'sections', '(', ')', ':', 'if', 'name', '.', 'startswith', '(', "'connection:'", ')', 'and', "'host'", 'not', 'in', 'dict', '(', 'self', '.', 'items', '(', 'name', ')', ')', ':', 'self', '.', 'set', '(', 'name', ',', "'host'", ',', 'name', '.', 'split', '(', "':'", ')', '[', '1', ']', ')', 'self', '.', 'generate_tags', '(', ')']
Reads the file specified by filename This method will load the eapi.conf file specified by filename into the instance object. It will also add the default connection localhost if it was not defined in the eapi.conf file Args: filename (str): The full path to the file to load
['Reads', 'the', 'file', 'specified', 'by', 'filename']
train
https://github.com/arista-eosplus/pyeapi/blob/96a74faef1fe3bd79c4e900aed29c9956a0587d6/pyeapi/client.py#L183-L209
796
SavinaRoja/OpenAccess_EPUB
src/openaccess_epub/utils/images.py
make_image_cache
def make_image_cache(img_cache): """ Initiates the image cache if it does not exist """ log.info('Initiating the image cache at {0}'.format(img_cache)) if not os.path.isdir(img_cache): utils.mkdir_p(img_cache) utils.mkdir_p(os.path.join(img_cache, '10.1371')) utils.mkdir_p(os.path.join(img_cache, '10.3389'))
python
def make_image_cache(img_cache): """ Initiates the image cache if it does not exist """ log.info('Initiating the image cache at {0}'.format(img_cache)) if not os.path.isdir(img_cache): utils.mkdir_p(img_cache) utils.mkdir_p(os.path.join(img_cache, '10.1371')) utils.mkdir_p(os.path.join(img_cache, '10.3389'))
['def', 'make_image_cache', '(', 'img_cache', ')', ':', 'log', '.', 'info', '(', "'Initiating the image cache at {0}'", '.', 'format', '(', 'img_cache', ')', ')', 'if', 'not', 'os', '.', 'path', '.', 'isdir', '(', 'img_cache', ')', ':', 'utils', '.', 'mkdir_p', '(', 'img_cache', ')', 'utils', '.', 'mkdir_p', '(', 'os', '.', 'path', '.', 'join', '(', 'img_cache', ',', "'10.1371'", ')', ')', 'utils', '.', 'mkdir_p', '(', 'os', '.', 'path', '.', 'join', '(', 'img_cache', ',', "'10.3389'", ')', ')']
Initiates the image cache if it does not exist
['Initiates', 'the', 'image', 'cache', 'if', 'it', 'does', 'not', 'exist']
train
https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/images.py#L171-L179
797
googleapis/google-cloud-python
api_core/google/api_core/gapic_v1/method.py
wrap_method
def wrap_method( func, default_retry=None, default_timeout=None, client_info=client_info.DEFAULT_CLIENT_INFO, ): """Wrap an RPC method with common behavior. This applies common error wrapping, retry, and timeout behavior a function. The wrapped function will take optional ``retry`` and ``timeout`` arguments. For example:: import google.api_core.gapic_v1.method from google.api_core import retry from google.api_core import timeout # The original RPC method. def get_topic(name, timeout=None): request = publisher_v2.GetTopicRequest(name=name) return publisher_stub.GetTopic(request, timeout=timeout) default_retry = retry.Retry(deadline=60) default_timeout = timeout.Timeout(deadline=60) wrapped_get_topic = google.api_core.gapic_v1.method.wrap_method( get_topic, default_retry) # Execute get_topic with default retry and timeout: response = wrapped_get_topic() # Execute get_topic without doing any retying but with the default # timeout: response = wrapped_get_topic(retry=None) # Execute get_topic but only retry on 5xx errors: my_retry = retry.Retry(retry.if_exception_type( exceptions.InternalServerError)) response = wrapped_get_topic(retry=my_retry) The way this works is by late-wrapping the given function with the retry and timeout decorators. Essentially, when ``wrapped_get_topic()`` is called: * ``get_topic()`` is first wrapped with the ``timeout`` into ``get_topic_with_timeout``. * ``get_topic_with_timeout`` is wrapped with the ``retry`` into ``get_topic_with_timeout_and_retry()``. * The final ``get_topic_with_timeout_and_retry`` is called passing through the ``args`` and ``kwargs``. The callstack is therefore:: method.__call__() -> Retry.__call__() -> Timeout.__call__() -> wrap_errors() -> get_topic() Note that if ``timeout`` or ``retry`` is ``None``, then they are not applied to the function. For example, ``wrapped_get_topic(timeout=None, retry=None)`` is more or less equivalent to just calling ``get_topic`` but with error re-mapping. Args: func (Callable[Any]): The function to wrap. It should accept an optional ``timeout`` argument. If ``metadata`` is not ``None``, it should accept a ``metadata`` argument. default_retry (Optional[google.api_core.Retry]): The default retry strategy. If ``None``, the method will not retry by default. default_timeout (Optional[google.api_core.Timeout]): The default timeout strategy. Can also be specified as an int or float. If ``None``, the method will not have timeout specified by default. client_info (Optional[google.api_core.gapic_v1.client_info.ClientInfo]): Client information used to create a user-agent string that's passed as gRPC metadata to the method. If unspecified, then a sane default will be used. If ``None``, then no user agent metadata will be provided to the RPC method. Returns: Callable: A new callable that takes optional ``retry`` and ``timeout`` arguments and applies the common error mapping, retry, timeout, and metadata behavior to the low-level RPC method. """ func = grpc_helpers.wrap_errors(func) if client_info is not None: user_agent_metadata = [client_info.to_grpc_metadata()] else: user_agent_metadata = None return general_helpers.wraps(func)( _GapicCallable( func, default_retry, default_timeout, metadata=user_agent_metadata ) )
python
def wrap_method( func, default_retry=None, default_timeout=None, client_info=client_info.DEFAULT_CLIENT_INFO, ): """Wrap an RPC method with common behavior. This applies common error wrapping, retry, and timeout behavior a function. The wrapped function will take optional ``retry`` and ``timeout`` arguments. For example:: import google.api_core.gapic_v1.method from google.api_core import retry from google.api_core import timeout # The original RPC method. def get_topic(name, timeout=None): request = publisher_v2.GetTopicRequest(name=name) return publisher_stub.GetTopic(request, timeout=timeout) default_retry = retry.Retry(deadline=60) default_timeout = timeout.Timeout(deadline=60) wrapped_get_topic = google.api_core.gapic_v1.method.wrap_method( get_topic, default_retry) # Execute get_topic with default retry and timeout: response = wrapped_get_topic() # Execute get_topic without doing any retying but with the default # timeout: response = wrapped_get_topic(retry=None) # Execute get_topic but only retry on 5xx errors: my_retry = retry.Retry(retry.if_exception_type( exceptions.InternalServerError)) response = wrapped_get_topic(retry=my_retry) The way this works is by late-wrapping the given function with the retry and timeout decorators. Essentially, when ``wrapped_get_topic()`` is called: * ``get_topic()`` is first wrapped with the ``timeout`` into ``get_topic_with_timeout``. * ``get_topic_with_timeout`` is wrapped with the ``retry`` into ``get_topic_with_timeout_and_retry()``. * The final ``get_topic_with_timeout_and_retry`` is called passing through the ``args`` and ``kwargs``. The callstack is therefore:: method.__call__() -> Retry.__call__() -> Timeout.__call__() -> wrap_errors() -> get_topic() Note that if ``timeout`` or ``retry`` is ``None``, then they are not applied to the function. For example, ``wrapped_get_topic(timeout=None, retry=None)`` is more or less equivalent to just calling ``get_topic`` but with error re-mapping. Args: func (Callable[Any]): The function to wrap. It should accept an optional ``timeout`` argument. If ``metadata`` is not ``None``, it should accept a ``metadata`` argument. default_retry (Optional[google.api_core.Retry]): The default retry strategy. If ``None``, the method will not retry by default. default_timeout (Optional[google.api_core.Timeout]): The default timeout strategy. Can also be specified as an int or float. If ``None``, the method will not have timeout specified by default. client_info (Optional[google.api_core.gapic_v1.client_info.ClientInfo]): Client information used to create a user-agent string that's passed as gRPC metadata to the method. If unspecified, then a sane default will be used. If ``None``, then no user agent metadata will be provided to the RPC method. Returns: Callable: A new callable that takes optional ``retry`` and ``timeout`` arguments and applies the common error mapping, retry, timeout, and metadata behavior to the low-level RPC method. """ func = grpc_helpers.wrap_errors(func) if client_info is not None: user_agent_metadata = [client_info.to_grpc_metadata()] else: user_agent_metadata = None return general_helpers.wraps(func)( _GapicCallable( func, default_retry, default_timeout, metadata=user_agent_metadata ) )
['def', 'wrap_method', '(', 'func', ',', 'default_retry', '=', 'None', ',', 'default_timeout', '=', 'None', ',', 'client_info', '=', 'client_info', '.', 'DEFAULT_CLIENT_INFO', ',', ')', ':', 'func', '=', 'grpc_helpers', '.', 'wrap_errors', '(', 'func', ')', 'if', 'client_info', 'is', 'not', 'None', ':', 'user_agent_metadata', '=', '[', 'client_info', '.', 'to_grpc_metadata', '(', ')', ']', 'else', ':', 'user_agent_metadata', '=', 'None', 'return', 'general_helpers', '.', 'wraps', '(', 'func', ')', '(', '_GapicCallable', '(', 'func', ',', 'default_retry', ',', 'default_timeout', ',', 'metadata', '=', 'user_agent_metadata', ')', ')']
Wrap an RPC method with common behavior. This applies common error wrapping, retry, and timeout behavior a function. The wrapped function will take optional ``retry`` and ``timeout`` arguments. For example:: import google.api_core.gapic_v1.method from google.api_core import retry from google.api_core import timeout # The original RPC method. def get_topic(name, timeout=None): request = publisher_v2.GetTopicRequest(name=name) return publisher_stub.GetTopic(request, timeout=timeout) default_retry = retry.Retry(deadline=60) default_timeout = timeout.Timeout(deadline=60) wrapped_get_topic = google.api_core.gapic_v1.method.wrap_method( get_topic, default_retry) # Execute get_topic with default retry and timeout: response = wrapped_get_topic() # Execute get_topic without doing any retying but with the default # timeout: response = wrapped_get_topic(retry=None) # Execute get_topic but only retry on 5xx errors: my_retry = retry.Retry(retry.if_exception_type( exceptions.InternalServerError)) response = wrapped_get_topic(retry=my_retry) The way this works is by late-wrapping the given function with the retry and timeout decorators. Essentially, when ``wrapped_get_topic()`` is called: * ``get_topic()`` is first wrapped with the ``timeout`` into ``get_topic_with_timeout``. * ``get_topic_with_timeout`` is wrapped with the ``retry`` into ``get_topic_with_timeout_and_retry()``. * The final ``get_topic_with_timeout_and_retry`` is called passing through the ``args`` and ``kwargs``. The callstack is therefore:: method.__call__() -> Retry.__call__() -> Timeout.__call__() -> wrap_errors() -> get_topic() Note that if ``timeout`` or ``retry`` is ``None``, then they are not applied to the function. For example, ``wrapped_get_topic(timeout=None, retry=None)`` is more or less equivalent to just calling ``get_topic`` but with error re-mapping. Args: func (Callable[Any]): The function to wrap. It should accept an optional ``timeout`` argument. If ``metadata`` is not ``None``, it should accept a ``metadata`` argument. default_retry (Optional[google.api_core.Retry]): The default retry strategy. If ``None``, the method will not retry by default. default_timeout (Optional[google.api_core.Timeout]): The default timeout strategy. Can also be specified as an int or float. If ``None``, the method will not have timeout specified by default. client_info (Optional[google.api_core.gapic_v1.client_info.ClientInfo]): Client information used to create a user-agent string that's passed as gRPC metadata to the method. If unspecified, then a sane default will be used. If ``None``, then no user agent metadata will be provided to the RPC method. Returns: Callable: A new callable that takes optional ``retry`` and ``timeout`` arguments and applies the common error mapping, retry, timeout, and metadata behavior to the low-level RPC method.
['Wrap', 'an', 'RPC', 'method', 'with', 'common', 'behavior', '.']
train
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/api_core/google/api_core/gapic_v1/method.py#L146-L242
798
d0c-s4vage/pfp
pfp/interp.py
Scope.clone
def clone(self): """Return a new Scope object that has the curr_scope pinned at the current one :returns: A new scope object """ self._dlog("cloning the stack") # TODO is this really necessary to create a brand new one? # I think it is... need to think about it more. # or... are we going to need ref counters and a global # scope object that allows a view into (or a snapshot of) # a specific scope stack? res = Scope(self._log) res._scope_stack = self._scope_stack res._curr_scope = self._curr_scope return res
python
def clone(self): """Return a new Scope object that has the curr_scope pinned at the current one :returns: A new scope object """ self._dlog("cloning the stack") # TODO is this really necessary to create a brand new one? # I think it is... need to think about it more. # or... are we going to need ref counters and a global # scope object that allows a view into (or a snapshot of) # a specific scope stack? res = Scope(self._log) res._scope_stack = self._scope_stack res._curr_scope = self._curr_scope return res
['def', 'clone', '(', 'self', ')', ':', 'self', '.', '_dlog', '(', '"cloning the stack"', ')', '# TODO is this really necessary to create a brand new one?', '# I think it is... need to think about it more.', '# or... are we going to need ref counters and a global', '# scope object that allows a view into (or a snapshot of)', '# a specific scope stack?', 'res', '=', 'Scope', '(', 'self', '.', '_log', ')', 'res', '.', '_scope_stack', '=', 'self', '.', '_scope_stack', 'res', '.', '_curr_scope', '=', 'self', '.', '_curr_scope', 'return', 'res']
Return a new Scope object that has the curr_scope pinned at the current one :returns: A new scope object
['Return', 'a', 'new', 'Scope', 'object', 'that', 'has', 'the', 'curr_scope', 'pinned', 'at', 'the', 'current', 'one', ':', 'returns', ':', 'A', 'new', 'scope', 'object']
train
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/interp.py#L295-L309
799
kensho-technologies/graphql-compiler
graphql_compiler/compiler/expressions.py
BinaryComposition.visit_and_update
def visit_and_update(self, visitor_fn): """Create an updated version (if needed) of BinaryComposition via the visitor pattern.""" new_left = self.left.visit_and_update(visitor_fn) new_right = self.right.visit_and_update(visitor_fn) if new_left is not self.left or new_right is not self.right: return visitor_fn(BinaryComposition(self.operator, new_left, new_right)) else: return visitor_fn(self)
python
def visit_and_update(self, visitor_fn): """Create an updated version (if needed) of BinaryComposition via the visitor pattern.""" new_left = self.left.visit_and_update(visitor_fn) new_right = self.right.visit_and_update(visitor_fn) if new_left is not self.left or new_right is not self.right: return visitor_fn(BinaryComposition(self.operator, new_left, new_right)) else: return visitor_fn(self)
['def', 'visit_and_update', '(', 'self', ',', 'visitor_fn', ')', ':', 'new_left', '=', 'self', '.', 'left', '.', 'visit_and_update', '(', 'visitor_fn', ')', 'new_right', '=', 'self', '.', 'right', '.', 'visit_and_update', '(', 'visitor_fn', ')', 'if', 'new_left', 'is', 'not', 'self', '.', 'left', 'or', 'new_right', 'is', 'not', 'self', '.', 'right', ':', 'return', 'visitor_fn', '(', 'BinaryComposition', '(', 'self', '.', 'operator', ',', 'new_left', ',', 'new_right', ')', ')', 'else', ':', 'return', 'visitor_fn', '(', 'self', ')']
Create an updated version (if needed) of BinaryComposition via the visitor pattern.
['Create', 'an', 'updated', 'version', '(', 'if', 'needed', ')', 'of', 'BinaryComposition', 'via', 'the', 'visitor', 'pattern', '.']
train
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/expressions.py#L781-L789