Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
1,000
IntegralDefense/critsapi
critsapi/critsapi.py
CRITsAPI.forge_relationship
def forge_relationship(self, left_id, left_type, right_id, right_type, rel_type='Related To', rel_date=None, rel_confidence='high', rel_reason=''): """ Forges a relationship between two TLOs. Args: left_id: The CRITs ID of the first indicator left_type: The CRITs TLO type of the first indicator right_id: The CRITs ID of the second indicator right_type: The CRITs TLO type of the second indicator rel_type: The relationships type ("Related To", etc) rel_date: datetime.datetime object for the date of the relationship. If left blank, it will be datetime.datetime.now() rel_confidence: The relationship confidence (high, medium, low) rel_reason: Reason for the relationship. Returns: True if the relationship was created. False otherwise. """ if not rel_date: rel_date = datetime.datetime.now() type_trans = self._type_translation(left_type) submit_url = '{}/{}/{}/'.format(self.url, type_trans, left_id) params = { 'api_key': self.api_key, 'username': self.username, } data = { 'action': 'forge_relationship', 'right_type': right_type, 'right_id': right_id, 'rel_type': rel_type, 'rel_date': rel_date, 'rel_confidence': rel_confidence, 'rel_reason': rel_reason } r = requests.patch(submit_url, params=params, data=data, proxies=self.proxies, verify=self.verify) if r.status_code == 200: log.debug('Relationship built successfully: {0} <-> ' '{1}'.format(left_id, right_id)) return True else: log.error('Error with status code {0} and message {1} between ' 'these indicators: {2} <-> ' '{3}'.format(r.status_code, r.text, left_id, right_id)) return False
python
def forge_relationship(self, left_id, left_type, right_id, right_type, rel_type='Related To', rel_date=None, rel_confidence='high', rel_reason=''): """ Forges a relationship between two TLOs. Args: left_id: The CRITs ID of the first indicator left_type: The CRITs TLO type of the first indicator right_id: The CRITs ID of the second indicator right_type: The CRITs TLO type of the second indicator rel_type: The relationships type ("Related To", etc) rel_date: datetime.datetime object for the date of the relationship. If left blank, it will be datetime.datetime.now() rel_confidence: The relationship confidence (high, medium, low) rel_reason: Reason for the relationship. Returns: True if the relationship was created. False otherwise. """ if not rel_date: rel_date = datetime.datetime.now() type_trans = self._type_translation(left_type) submit_url = '{}/{}/{}/'.format(self.url, type_trans, left_id) params = { 'api_key': self.api_key, 'username': self.username, } data = { 'action': 'forge_relationship', 'right_type': right_type, 'right_id': right_id, 'rel_type': rel_type, 'rel_date': rel_date, 'rel_confidence': rel_confidence, 'rel_reason': rel_reason } r = requests.patch(submit_url, params=params, data=data, proxies=self.proxies, verify=self.verify) if r.status_code == 200: log.debug('Relationship built successfully: {0} <-> ' '{1}'.format(left_id, right_id)) return True else: log.error('Error with status code {0} and message {1} between ' 'these indicators: {2} <-> ' '{3}'.format(r.status_code, r.text, left_id, right_id)) return False
['def', 'forge_relationship', '(', 'self', ',', 'left_id', ',', 'left_type', ',', 'right_id', ',', 'right_type', ',', 'rel_type', '=', "'Related To'", ',', 'rel_date', '=', 'None', ',', 'rel_confidence', '=', "'high'", ',', 'rel_reason', '=', "''", ')', ':', 'if', 'not', 'rel_date', ':', 'rel_date', '=', 'datetime', '.', 'datetime', '.', 'now', '(', ')', 'type_trans', '=', 'self', '.', '_type_translation', '(', 'left_type', ')', 'submit_url', '=', "'{}/{}/{}/'", '.', 'format', '(', 'self', '.', 'url', ',', 'type_trans', ',', 'left_id', ')', 'params', '=', '{', "'api_key'", ':', 'self', '.', 'api_key', ',', "'username'", ':', 'self', '.', 'username', ',', '}', 'data', '=', '{', "'action'", ':', "'forge_relationship'", ',', "'right_type'", ':', 'right_type', ',', "'right_id'", ':', 'right_id', ',', "'rel_type'", ':', 'rel_type', ',', "'rel_date'", ':', 'rel_date', ',', "'rel_confidence'", ':', 'rel_confidence', ',', "'rel_reason'", ':', 'rel_reason', '}', 'r', '=', 'requests', '.', 'patch', '(', 'submit_url', ',', 'params', '=', 'params', ',', 'data', '=', 'data', ',', 'proxies', '=', 'self', '.', 'proxies', ',', 'verify', '=', 'self', '.', 'verify', ')', 'if', 'r', '.', 'status_code', '==', '200', ':', 'log', '.', 'debug', '(', "'Relationship built successfully: {0} <-> '", "'{1}'", '.', 'format', '(', 'left_id', ',', 'right_id', ')', ')', 'return', 'True', 'else', ':', 'log', '.', 'error', '(', "'Error with status code {0} and message {1} between '", "'these indicators: {2} <-> '", "'{3}'", '.', 'format', '(', 'r', '.', 'status_code', ',', 'r', '.', 'text', ',', 'left_id', ',', 'right_id', ')', ')', 'return', 'False']
Forges a relationship between two TLOs. Args: left_id: The CRITs ID of the first indicator left_type: The CRITs TLO type of the first indicator right_id: The CRITs ID of the second indicator right_type: The CRITs TLO type of the second indicator rel_type: The relationships type ("Related To", etc) rel_date: datetime.datetime object for the date of the relationship. If left blank, it will be datetime.datetime.now() rel_confidence: The relationship confidence (high, medium, low) rel_reason: Reason for the relationship. Returns: True if the relationship was created. False otherwise.
['Forges', 'a', 'relationship', 'between', 'two', 'TLOs', '.']
train
https://github.com/IntegralDefense/critsapi/blob/e770bd81e124eaaeb5f1134ba95f4a35ff345c5a/critsapi/critsapi.py#L631-L680
1,001
phoebe-project/phoebe2
phoebe/frontend/bundle.py
Bundle.get_constraint
def get_constraint(self, twig=None, **kwargs): """ Filter in the 'constraint' context :parameter str constraint: name of the constraint (optional) :parameter **kwargs: any other tags to do the filter (except constraint or context) :return: :class:`phoebe.parameters.parameters.ParameterSet` """ if twig is not None: kwargs['twig'] = twig kwargs['context'] = 'constraint' return self.get(**kwargs)
python
def get_constraint(self, twig=None, **kwargs): """ Filter in the 'constraint' context :parameter str constraint: name of the constraint (optional) :parameter **kwargs: any other tags to do the filter (except constraint or context) :return: :class:`phoebe.parameters.parameters.ParameterSet` """ if twig is not None: kwargs['twig'] = twig kwargs['context'] = 'constraint' return self.get(**kwargs)
['def', 'get_constraint', '(', 'self', ',', 'twig', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'if', 'twig', 'is', 'not', 'None', ':', 'kwargs', '[', "'twig'", ']', '=', 'twig', 'kwargs', '[', "'context'", ']', '=', "'constraint'", 'return', 'self', '.', 'get', '(', '*', '*', 'kwargs', ')']
Filter in the 'constraint' context :parameter str constraint: name of the constraint (optional) :parameter **kwargs: any other tags to do the filter (except constraint or context) :return: :class:`phoebe.parameters.parameters.ParameterSet`
['Filter', 'in', 'the', 'constraint', 'context']
train
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/frontend/bundle.py#L2698-L2710
1,002
jobovy/galpy
galpy/potential/FerrersPotential.py
FerrersPotential._Rzderiv
def _Rzderiv(self,R,z,phi=0.,t=0.): """ NAME: _Rzderiv PURPOSE: evaluate the mixed radial, vertical derivative for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the mixed radial, vertical derivative """ if not self.isNonAxi: phi= 0. x,y,z= self._compute_xyz(R,phi,z,t) phixza= self._2ndderiv_xyz(x,y,z,0,2) phiyza= self._2ndderiv_xyz(x,y,z,1,2) ang = self._omegab*t + self._pa c, s = np.cos(ang), np.sin(ang) phixz = c*phixza + s*phiyza phiyz = -s*phixza + c*phiyza return np.cos(phi)*phixz + np.sin(phi)*phiyz
python
def _Rzderiv(self,R,z,phi=0.,t=0.): """ NAME: _Rzderiv PURPOSE: evaluate the mixed radial, vertical derivative for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the mixed radial, vertical derivative """ if not self.isNonAxi: phi= 0. x,y,z= self._compute_xyz(R,phi,z,t) phixza= self._2ndderiv_xyz(x,y,z,0,2) phiyza= self._2ndderiv_xyz(x,y,z,1,2) ang = self._omegab*t + self._pa c, s = np.cos(ang), np.sin(ang) phixz = c*phixza + s*phiyza phiyz = -s*phixza + c*phiyza return np.cos(phi)*phixz + np.sin(phi)*phiyz
['def', '_Rzderiv', '(', 'self', ',', 'R', ',', 'z', ',', 'phi', '=', '0.', ',', 't', '=', '0.', ')', ':', 'if', 'not', 'self', '.', 'isNonAxi', ':', 'phi', '=', '0.', 'x', ',', 'y', ',', 'z', '=', 'self', '.', '_compute_xyz', '(', 'R', ',', 'phi', ',', 'z', ',', 't', ')', 'phixza', '=', 'self', '.', '_2ndderiv_xyz', '(', 'x', ',', 'y', ',', 'z', ',', '0', ',', '2', ')', 'phiyza', '=', 'self', '.', '_2ndderiv_xyz', '(', 'x', ',', 'y', ',', 'z', ',', '1', ',', '2', ')', 'ang', '=', 'self', '.', '_omegab', '*', 't', '+', 'self', '.', '_pa', 'c', ',', 's', '=', 'np', '.', 'cos', '(', 'ang', ')', ',', 'np', '.', 'sin', '(', 'ang', ')', 'phixz', '=', 'c', '*', 'phixza', '+', 's', '*', 'phiyza', 'phiyz', '=', '-', 's', '*', 'phixza', '+', 'c', '*', 'phiyza', 'return', 'np', '.', 'cos', '(', 'phi', ')', '*', 'phixz', '+', 'np', '.', 'sin', '(', 'phi', ')', '*', 'phiyz']
NAME: _Rzderiv PURPOSE: evaluate the mixed radial, vertical derivative for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the mixed radial, vertical derivative
['NAME', ':', '_Rzderiv', 'PURPOSE', ':', 'evaluate', 'the', 'mixed', 'radial', 'vertical', 'derivative', 'for', 'this', 'potential', 'INPUT', ':', 'R', '-', 'Galactocentric', 'cylindrical', 'radius', 'z', '-', 'vertical', 'height', 'phi', '-', 'azimuth', 't', '-', 'time', 'OUTPUT', ':', 'the', 'mixed', 'radial', 'vertical', 'derivative']
train
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/FerrersPotential.py#L257-L280
1,003
fermiPy/fermipy
fermipy/jobs/job_archive.py
JobArchive.make_job_details
def make_job_details(self, row_idx): """Create a `JobDetails` from an `astropy.table.row.Row` """ row = self._table[row_idx] job_details = JobDetails.create_from_row(row) job_details.get_file_paths(self._file_archive, self._table_id_array) self._cache[job_details.fullkey] = job_details return job_details
python
def make_job_details(self, row_idx): """Create a `JobDetails` from an `astropy.table.row.Row` """ row = self._table[row_idx] job_details = JobDetails.create_from_row(row) job_details.get_file_paths(self._file_archive, self._table_id_array) self._cache[job_details.fullkey] = job_details return job_details
['def', 'make_job_details', '(', 'self', ',', 'row_idx', ')', ':', 'row', '=', 'self', '.', '_table', '[', 'row_idx', ']', 'job_details', '=', 'JobDetails', '.', 'create_from_row', '(', 'row', ')', 'job_details', '.', 'get_file_paths', '(', 'self', '.', '_file_archive', ',', 'self', '.', '_table_id_array', ')', 'self', '.', '_cache', '[', 'job_details', '.', 'fullkey', ']', '=', 'job_details', 'return', 'job_details']
Create a `JobDetails` from an `astropy.table.row.Row`
['Create', 'a', 'JobDetails', 'from', 'an', 'astropy', '.', 'table', '.', 'row', '.', 'Row']
train
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/job_archive.py#L536-L542
1,004
jmcgeheeiv/pyfakefs
pyfakefs/fake_filesystem.py
FakePathModule.dir
def dir(): """Return the list of patched function names. Used for patching functions imported from the module. """ dir = [ 'abspath', 'dirname', 'exists', 'expanduser', 'getatime', 'getctime', 'getmtime', 'getsize', 'isabs', 'isdir', 'isfile', 'islink', 'ismount', 'join', 'lexists', 'normcase', 'normpath', 'realpath', 'relpath', 'split', 'splitdrive' ] if IS_PY2: dir.append('walk') if sys.platform != 'win32' or not IS_PY2: dir.append('samefile') return dir
python
def dir(): """Return the list of patched function names. Used for patching functions imported from the module. """ dir = [ 'abspath', 'dirname', 'exists', 'expanduser', 'getatime', 'getctime', 'getmtime', 'getsize', 'isabs', 'isdir', 'isfile', 'islink', 'ismount', 'join', 'lexists', 'normcase', 'normpath', 'realpath', 'relpath', 'split', 'splitdrive' ] if IS_PY2: dir.append('walk') if sys.platform != 'win32' or not IS_PY2: dir.append('samefile') return dir
['def', 'dir', '(', ')', ':', 'dir', '=', '[', "'abspath'", ',', "'dirname'", ',', "'exists'", ',', "'expanduser'", ',', "'getatime'", ',', "'getctime'", ',', "'getmtime'", ',', "'getsize'", ',', "'isabs'", ',', "'isdir'", ',', "'isfile'", ',', "'islink'", ',', "'ismount'", ',', "'join'", ',', "'lexists'", ',', "'normcase'", ',', "'normpath'", ',', "'realpath'", ',', "'relpath'", ',', "'split'", ',', "'splitdrive'", ']', 'if', 'IS_PY2', ':', 'dir', '.', 'append', '(', "'walk'", ')', 'if', 'sys', '.', 'platform', '!=', "'win32'", 'or', 'not', 'IS_PY2', ':', 'dir', '.', 'append', '(', "'samefile'", ')', 'return', 'dir']
Return the list of patched function names. Used for patching functions imported from the module.
['Return', 'the', 'list', 'of', 'patched', 'function', 'names', '.', 'Used', 'for', 'patching', 'functions', 'imported', 'from', 'the', 'module', '.']
train
https://github.com/jmcgeheeiv/pyfakefs/blob/6c36fb8987108107fc861fc3013620d46c7d2f9c/pyfakefs/fake_filesystem.py#L3107-L3121
1,005
WebarchivCZ/WA-KAT
src/wa_kat/templates/static/js/Lib/site-packages/wa_kat_main.py
AlephISSNReaderAdapter.start
def start(cls, ev=None): """ Start the query to aleph by ISSN. """ ViewController.log_view.add("Beginning AlephReader request..") ViewController.issnbox_error.reset() issn = ViewController.issn.strip() # make sure, that `issn` was filled if not issn: ViewController.issnbox_error.show("ISSN nebylo vyplněno!") ViewController.log_view.add("No ISSN! Aborting.") return ViewController.issnbox_error.hide() ViewController.issn_progressbar.reset() ViewController.issn_progressbar.show(50) ViewController.log_view.add("For ISSN `%s`." % issn) make_request( url=join(settings.API_PATH, "aleph/records_by_issn"), data={'issn': issn}, on_complete=cls.on_complete )
python
def start(cls, ev=None): """ Start the query to aleph by ISSN. """ ViewController.log_view.add("Beginning AlephReader request..") ViewController.issnbox_error.reset() issn = ViewController.issn.strip() # make sure, that `issn` was filled if not issn: ViewController.issnbox_error.show("ISSN nebylo vyplněno!") ViewController.log_view.add("No ISSN! Aborting.") return ViewController.issnbox_error.hide() ViewController.issn_progressbar.reset() ViewController.issn_progressbar.show(50) ViewController.log_view.add("For ISSN `%s`." % issn) make_request( url=join(settings.API_PATH, "aleph/records_by_issn"), data={'issn': issn}, on_complete=cls.on_complete )
['def', 'start', '(', 'cls', ',', 'ev', '=', 'None', ')', ':', 'ViewController', '.', 'log_view', '.', 'add', '(', '"Beginning AlephReader request.."', ')', 'ViewController', '.', 'issnbox_error', '.', 'reset', '(', ')', 'issn', '=', 'ViewController', '.', 'issn', '.', 'strip', '(', ')', '# make sure, that `issn` was filled', 'if', 'not', 'issn', ':', 'ViewController', '.', 'issnbox_error', '.', 'show', '(', '"ISSN nebylo vyplněno!")', '', 'ViewController', '.', 'log_view', '.', 'add', '(', '"No ISSN! Aborting."', ')', 'return', 'ViewController', '.', 'issnbox_error', '.', 'hide', '(', ')', 'ViewController', '.', 'issn_progressbar', '.', 'reset', '(', ')', 'ViewController', '.', 'issn_progressbar', '.', 'show', '(', '50', ')', 'ViewController', '.', 'log_view', '.', 'add', '(', '"For ISSN `%s`."', '%', 'issn', ')', 'make_request', '(', 'url', '=', 'join', '(', 'settings', '.', 'API_PATH', ',', '"aleph/records_by_issn"', ')', ',', 'data', '=', '{', "'issn'", ':', 'issn', '}', ',', 'on_complete', '=', 'cls', '.', 'on_complete', ')']
Start the query to aleph by ISSN.
['Start', 'the', 'query', 'to', 'aleph', 'by', 'ISSN', '.']
train
https://github.com/WebarchivCZ/WA-KAT/blob/16d064a3a775dc1d2713debda7847ded52dd2a06/src/wa_kat/templates/static/js/Lib/site-packages/wa_kat_main.py#L210-L234
1,006
gem/oq-engine
openquake/hazardlib/geo/geodetic.py
npoints_between
def npoints_between(lon1, lat1, depth1, lon2, lat2, depth2, npoints): """ Find a list of specified number of points between two given ones that are equally spaced along the great circle arc connecting given points. :param float lon1, lat1, depth1: Coordinates of a point to start from. The first point in a resulting list has these coordinates. :param float lon2, lat2, depth2: Coordinates of a point to finish at. The last point in a resulting list has these coordinates. :param npoints: Integer number of points to return. First and last points count, so if there have to be two intervals, ``npoints`` should be 3. :returns: Tuple of three 1d numpy arrays: longitudes, latitudes and depths of resulting points respectively. Finds distance between two reference points and calls :func:`npoints_towards`. """ hdist = geodetic_distance(lon1, lat1, lon2, lat2) vdist = depth2 - depth1 rlons, rlats, rdepths = npoints_towards( lon1, lat1, depth1, azimuth(lon1, lat1, lon2, lat2), hdist, vdist, npoints ) # the last point should be left intact rlons[-1] = lon2 rlats[-1] = lat2 rdepths[-1] = depth2 return rlons, rlats, rdepths
python
def npoints_between(lon1, lat1, depth1, lon2, lat2, depth2, npoints): """ Find a list of specified number of points between two given ones that are equally spaced along the great circle arc connecting given points. :param float lon1, lat1, depth1: Coordinates of a point to start from. The first point in a resulting list has these coordinates. :param float lon2, lat2, depth2: Coordinates of a point to finish at. The last point in a resulting list has these coordinates. :param npoints: Integer number of points to return. First and last points count, so if there have to be two intervals, ``npoints`` should be 3. :returns: Tuple of three 1d numpy arrays: longitudes, latitudes and depths of resulting points respectively. Finds distance between two reference points and calls :func:`npoints_towards`. """ hdist = geodetic_distance(lon1, lat1, lon2, lat2) vdist = depth2 - depth1 rlons, rlats, rdepths = npoints_towards( lon1, lat1, depth1, azimuth(lon1, lat1, lon2, lat2), hdist, vdist, npoints ) # the last point should be left intact rlons[-1] = lon2 rlats[-1] = lat2 rdepths[-1] = depth2 return rlons, rlats, rdepths
['def', 'npoints_between', '(', 'lon1', ',', 'lat1', ',', 'depth1', ',', 'lon2', ',', 'lat2', ',', 'depth2', ',', 'npoints', ')', ':', 'hdist', '=', 'geodetic_distance', '(', 'lon1', ',', 'lat1', ',', 'lon2', ',', 'lat2', ')', 'vdist', '=', 'depth2', '-', 'depth1', 'rlons', ',', 'rlats', ',', 'rdepths', '=', 'npoints_towards', '(', 'lon1', ',', 'lat1', ',', 'depth1', ',', 'azimuth', '(', 'lon1', ',', 'lat1', ',', 'lon2', ',', 'lat2', ')', ',', 'hdist', ',', 'vdist', ',', 'npoints', ')', '# the last point should be left intact', 'rlons', '[', '-', '1', ']', '=', 'lon2', 'rlats', '[', '-', '1', ']', '=', 'lat2', 'rdepths', '[', '-', '1', ']', '=', 'depth2', 'return', 'rlons', ',', 'rlats', ',', 'rdepths']
Find a list of specified number of points between two given ones that are equally spaced along the great circle arc connecting given points. :param float lon1, lat1, depth1: Coordinates of a point to start from. The first point in a resulting list has these coordinates. :param float lon2, lat2, depth2: Coordinates of a point to finish at. The last point in a resulting list has these coordinates. :param npoints: Integer number of points to return. First and last points count, so if there have to be two intervals, ``npoints`` should be 3. :returns: Tuple of three 1d numpy arrays: longitudes, latitudes and depths of resulting points respectively. Finds distance between two reference points and calls :func:`npoints_towards`.
['Find', 'a', 'list', 'of', 'specified', 'number', 'of', 'points', 'between', 'two', 'given', 'ones', 'that', 'are', 'equally', 'spaced', 'along', 'the', 'great', 'circle', 'arc', 'connecting', 'given', 'points', '.']
train
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/geo/geodetic.py#L305-L336
1,007
mlperf/training
translation/tensorflow/transformer/model/transformer.py
Transformer.predict
def predict(self, encoder_outputs, encoder_decoder_attention_bias): """Return predicted sequence.""" batch_size = tf.shape(encoder_outputs)[0] input_length = tf.shape(encoder_outputs)[1] max_decode_length = input_length + self.params.extra_decode_length symbols_to_logits_fn = self._get_symbols_to_logits_fn(max_decode_length) # Create initial set of IDs that will be passed into symbols_to_logits_fn. initial_ids = tf.zeros([batch_size], dtype=tf.int32) # Create cache storing decoder attention values for each layer. cache = { "layer_%d" % layer: { "k": tf.zeros([batch_size, 0, self.params.hidden_size]), "v": tf.zeros([batch_size, 0, self.params.hidden_size]), } for layer in range(self.params.num_hidden_layers)} # Add encoder output and attention bias to the cache. cache["encoder_outputs"] = encoder_outputs cache["encoder_decoder_attention_bias"] = encoder_decoder_attention_bias # Use beam search to find the top beam_size sequences and scores. mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_SEQ_BEAM_SEARCH, value={ "vocab_size": self.params.vocab_size, "beam_size": self.params.beam_size, "alpha": self.params.alpha, "extra_decode_length": self.params.extra_decode_length}) decoded_ids, scores = beam_search.sequence_beam_search( symbols_to_logits_fn=symbols_to_logits_fn, initial_ids=initial_ids, initial_cache=cache, vocab_size=self.params.vocab_size, beam_size=self.params.beam_size, alpha=self.params.alpha, max_decode_length=max_decode_length, eos_id=EOS_ID) # Get the top sequence for each batch element top_decoded_ids = decoded_ids[:, 0, 1:] top_scores = scores[:, 0] return {"outputs": top_decoded_ids, "scores": top_scores}
python
def predict(self, encoder_outputs, encoder_decoder_attention_bias): """Return predicted sequence.""" batch_size = tf.shape(encoder_outputs)[0] input_length = tf.shape(encoder_outputs)[1] max_decode_length = input_length + self.params.extra_decode_length symbols_to_logits_fn = self._get_symbols_to_logits_fn(max_decode_length) # Create initial set of IDs that will be passed into symbols_to_logits_fn. initial_ids = tf.zeros([batch_size], dtype=tf.int32) # Create cache storing decoder attention values for each layer. cache = { "layer_%d" % layer: { "k": tf.zeros([batch_size, 0, self.params.hidden_size]), "v": tf.zeros([batch_size, 0, self.params.hidden_size]), } for layer in range(self.params.num_hidden_layers)} # Add encoder output and attention bias to the cache. cache["encoder_outputs"] = encoder_outputs cache["encoder_decoder_attention_bias"] = encoder_decoder_attention_bias # Use beam search to find the top beam_size sequences and scores. mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_SEQ_BEAM_SEARCH, value={ "vocab_size": self.params.vocab_size, "beam_size": self.params.beam_size, "alpha": self.params.alpha, "extra_decode_length": self.params.extra_decode_length}) decoded_ids, scores = beam_search.sequence_beam_search( symbols_to_logits_fn=symbols_to_logits_fn, initial_ids=initial_ids, initial_cache=cache, vocab_size=self.params.vocab_size, beam_size=self.params.beam_size, alpha=self.params.alpha, max_decode_length=max_decode_length, eos_id=EOS_ID) # Get the top sequence for each batch element top_decoded_ids = decoded_ids[:, 0, 1:] top_scores = scores[:, 0] return {"outputs": top_decoded_ids, "scores": top_scores}
['def', 'predict', '(', 'self', ',', 'encoder_outputs', ',', 'encoder_decoder_attention_bias', ')', ':', 'batch_size', '=', 'tf', '.', 'shape', '(', 'encoder_outputs', ')', '[', '0', ']', 'input_length', '=', 'tf', '.', 'shape', '(', 'encoder_outputs', ')', '[', '1', ']', 'max_decode_length', '=', 'input_length', '+', 'self', '.', 'params', '.', 'extra_decode_length', 'symbols_to_logits_fn', '=', 'self', '.', '_get_symbols_to_logits_fn', '(', 'max_decode_length', ')', '# Create initial set of IDs that will be passed into symbols_to_logits_fn.', 'initial_ids', '=', 'tf', '.', 'zeros', '(', '[', 'batch_size', ']', ',', 'dtype', '=', 'tf', '.', 'int32', ')', '# Create cache storing decoder attention values for each layer.', 'cache', '=', '{', '"layer_%d"', '%', 'layer', ':', '{', '"k"', ':', 'tf', '.', 'zeros', '(', '[', 'batch_size', ',', '0', ',', 'self', '.', 'params', '.', 'hidden_size', ']', ')', ',', '"v"', ':', 'tf', '.', 'zeros', '(', '[', 'batch_size', ',', '0', ',', 'self', '.', 'params', '.', 'hidden_size', ']', ')', ',', '}', 'for', 'layer', 'in', 'range', '(', 'self', '.', 'params', '.', 'num_hidden_layers', ')', '}', '# Add encoder output and attention bias to the cache.', 'cache', '[', '"encoder_outputs"', ']', '=', 'encoder_outputs', 'cache', '[', '"encoder_decoder_attention_bias"', ']', '=', 'encoder_decoder_attention_bias', '# Use beam search to find the top beam_size sequences and scores.', 'mlperf_log', '.', 'transformer_print', '(', 'key', '=', 'mlperf_log', '.', 'MODEL_HP_SEQ_BEAM_SEARCH', ',', 'value', '=', '{', '"vocab_size"', ':', 'self', '.', 'params', '.', 'vocab_size', ',', '"beam_size"', ':', 'self', '.', 'params', '.', 'beam_size', ',', '"alpha"', ':', 'self', '.', 'params', '.', 'alpha', ',', '"extra_decode_length"', ':', 'self', '.', 'params', '.', 'extra_decode_length', '}', ')', 'decoded_ids', ',', 'scores', '=', 'beam_search', '.', 'sequence_beam_search', '(', 'symbols_to_logits_fn', '=', 'symbols_to_logits_fn', ',', 'initial_ids', '=', 'initial_ids', ',', 'initial_cache', '=', 'cache', ',', 'vocab_size', '=', 'self', '.', 'params', '.', 'vocab_size', ',', 'beam_size', '=', 'self', '.', 'params', '.', 'beam_size', ',', 'alpha', '=', 'self', '.', 'params', '.', 'alpha', ',', 'max_decode_length', '=', 'max_decode_length', ',', 'eos_id', '=', 'EOS_ID', ')', '# Get the top sequence for each batch element', 'top_decoded_ids', '=', 'decoded_ids', '[', ':', ',', '0', ',', '1', ':', ']', 'top_scores', '=', 'scores', '[', ':', ',', '0', ']', 'return', '{', '"outputs"', ':', 'top_decoded_ids', ',', '"scores"', ':', 'top_scores', '}']
Return predicted sequence.
['Return', 'predicted', 'sequence', '.']
train
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/translation/tensorflow/transformer/model/transformer.py#L217-L261
1,008
lsst-sqre/documenteer
documenteer/sphinxext/jira.py
jira_role
def jira_role(name, rawtext, text, lineno, inliner, options=None, content=None, oxford_comma=True): """Sphinx role for referencing a JIRA ticket. Examples:: :jira:`DM-6181` -> DM-6181 :jira:`DM-6181,DM-6181` -> DM-6180 and DM-6181 :jira:`DM-6181,DM-6181,DM-6182` -> DM-6180, DM-6181, and DM-6182 """ options = options or {} content = content or [] config = inliner.document.settings.env.app.config ticket_ids = [each.strip() for each in utils.unescape(text).split(',')] n_tickets = len(ticket_ids) if oxford_comma: sep_factory = _oxford_comma_separator else: sep_factory = _comma_separator node_list = [] for i, ticket_id in enumerate(ticket_ids): node = _make_ticket_node(ticket_id, config, options=options) node_list.append(node) sep_text = sep_factory(i, n_tickets) if sep_text is not None: sep = nodes.raw(text=sep_text, format='html') node_list.append(sep) return node_list, []
python
def jira_role(name, rawtext, text, lineno, inliner, options=None, content=None, oxford_comma=True): """Sphinx role for referencing a JIRA ticket. Examples:: :jira:`DM-6181` -> DM-6181 :jira:`DM-6181,DM-6181` -> DM-6180 and DM-6181 :jira:`DM-6181,DM-6181,DM-6182` -> DM-6180, DM-6181, and DM-6182 """ options = options or {} content = content or [] config = inliner.document.settings.env.app.config ticket_ids = [each.strip() for each in utils.unescape(text).split(',')] n_tickets = len(ticket_ids) if oxford_comma: sep_factory = _oxford_comma_separator else: sep_factory = _comma_separator node_list = [] for i, ticket_id in enumerate(ticket_ids): node = _make_ticket_node(ticket_id, config, options=options) node_list.append(node) sep_text = sep_factory(i, n_tickets) if sep_text is not None: sep = nodes.raw(text=sep_text, format='html') node_list.append(sep) return node_list, []
['def', 'jira_role', '(', 'name', ',', 'rawtext', ',', 'text', ',', 'lineno', ',', 'inliner', ',', 'options', '=', 'None', ',', 'content', '=', 'None', ',', 'oxford_comma', '=', 'True', ')', ':', 'options', '=', 'options', 'or', '{', '}', 'content', '=', 'content', 'or', '[', ']', 'config', '=', 'inliner', '.', 'document', '.', 'settings', '.', 'env', '.', 'app', '.', 'config', 'ticket_ids', '=', '[', 'each', '.', 'strip', '(', ')', 'for', 'each', 'in', 'utils', '.', 'unescape', '(', 'text', ')', '.', 'split', '(', "','", ')', ']', 'n_tickets', '=', 'len', '(', 'ticket_ids', ')', 'if', 'oxford_comma', ':', 'sep_factory', '=', '_oxford_comma_separator', 'else', ':', 'sep_factory', '=', '_comma_separator', 'node_list', '=', '[', ']', 'for', 'i', ',', 'ticket_id', 'in', 'enumerate', '(', 'ticket_ids', ')', ':', 'node', '=', '_make_ticket_node', '(', 'ticket_id', ',', 'config', ',', 'options', '=', 'options', ')', 'node_list', '.', 'append', '(', 'node', ')', 'sep_text', '=', 'sep_factory', '(', 'i', ',', 'n_tickets', ')', 'if', 'sep_text', 'is', 'not', 'None', ':', 'sep', '=', 'nodes', '.', 'raw', '(', 'text', '=', 'sep_text', ',', 'format', '=', "'html'", ')', 'node_list', '.', 'append', '(', 'sep', ')', 'return', 'node_list', ',', '[', ']']
Sphinx role for referencing a JIRA ticket. Examples:: :jira:`DM-6181` -> DM-6181 :jira:`DM-6181,DM-6181` -> DM-6180 and DM-6181 :jira:`DM-6181,DM-6181,DM-6182` -> DM-6180, DM-6181, and DM-6182
['Sphinx', 'role', 'for', 'referencing', 'a', 'JIRA', 'ticket', '.']
train
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/jira.py#L53-L83
1,009
Trax-air/swagger-parser
swagger_parser/swagger_parser.py
SwaggerParser.validate_additional_properties
def validate_additional_properties(self, valid_response, response): """Validates additional properties. In additional properties, we only need to compare the values of the dict, not the keys Args: valid_response: An example response (for example generated in _get_example_from_properties(self, spec)) Type is DICT response: The actual dict coming from the response Type is DICT Returns: A boolean - whether the actual response validates against the given example """ assert isinstance(valid_response, dict) assert isinstance(response, dict) # the type of the value of the first key/value in valid_response is our # expected type - if it is a dict or list, we must go deeper first_value = valid_response[list(valid_response)[0]] # dict if isinstance(first_value, dict): # try to find a definition for that first value definition = None definition_name = self.get_dict_definition(first_value) if definition_name is None: definition = self._definition_from_example(first_value) definition_name = 'self generated' for item in response.values(): if not self.validate_definition(definition_name, item, definition=definition): return False return True # TODO: list if isinstance(first_value, list): raise Exception("Not implemented yet") # simple types # all values must be of that type in both valid and actual response try: assert all(isinstance(y, type(first_value)) for _, y in response.items()) assert all(isinstance(y, type(first_value)) for _, y in valid_response.items()) return True except Exception: return False
python
def validate_additional_properties(self, valid_response, response): """Validates additional properties. In additional properties, we only need to compare the values of the dict, not the keys Args: valid_response: An example response (for example generated in _get_example_from_properties(self, spec)) Type is DICT response: The actual dict coming from the response Type is DICT Returns: A boolean - whether the actual response validates against the given example """ assert isinstance(valid_response, dict) assert isinstance(response, dict) # the type of the value of the first key/value in valid_response is our # expected type - if it is a dict or list, we must go deeper first_value = valid_response[list(valid_response)[0]] # dict if isinstance(first_value, dict): # try to find a definition for that first value definition = None definition_name = self.get_dict_definition(first_value) if definition_name is None: definition = self._definition_from_example(first_value) definition_name = 'self generated' for item in response.values(): if not self.validate_definition(definition_name, item, definition=definition): return False return True # TODO: list if isinstance(first_value, list): raise Exception("Not implemented yet") # simple types # all values must be of that type in both valid and actual response try: assert all(isinstance(y, type(first_value)) for _, y in response.items()) assert all(isinstance(y, type(first_value)) for _, y in valid_response.items()) return True except Exception: return False
['def', 'validate_additional_properties', '(', 'self', ',', 'valid_response', ',', 'response', ')', ':', 'assert', 'isinstance', '(', 'valid_response', ',', 'dict', ')', 'assert', 'isinstance', '(', 'response', ',', 'dict', ')', '# the type of the value of the first key/value in valid_response is our', '# expected type - if it is a dict or list, we must go deeper', 'first_value', '=', 'valid_response', '[', 'list', '(', 'valid_response', ')', '[', '0', ']', ']', '# dict', 'if', 'isinstance', '(', 'first_value', ',', 'dict', ')', ':', '# try to find a definition for that first value', 'definition', '=', 'None', 'definition_name', '=', 'self', '.', 'get_dict_definition', '(', 'first_value', ')', 'if', 'definition_name', 'is', 'None', ':', 'definition', '=', 'self', '.', '_definition_from_example', '(', 'first_value', ')', 'definition_name', '=', "'self generated'", 'for', 'item', 'in', 'response', '.', 'values', '(', ')', ':', 'if', 'not', 'self', '.', 'validate_definition', '(', 'definition_name', ',', 'item', ',', 'definition', '=', 'definition', ')', ':', 'return', 'False', 'return', 'True', '# TODO: list', 'if', 'isinstance', '(', 'first_value', ',', 'list', ')', ':', 'raise', 'Exception', '(', '"Not implemented yet"', ')', '# simple types', '# all values must be of that type in both valid and actual response', 'try', ':', 'assert', 'all', '(', 'isinstance', '(', 'y', ',', 'type', '(', 'first_value', ')', ')', 'for', '_', ',', 'y', 'in', 'response', '.', 'items', '(', ')', ')', 'assert', 'all', '(', 'isinstance', '(', 'y', ',', 'type', '(', 'first_value', ')', ')', 'for', '_', ',', 'y', 'in', 'valid_response', '.', 'items', '(', ')', ')', 'return', 'True', 'except', 'Exception', ':', 'return', 'False']
Validates additional properties. In additional properties, we only need to compare the values of the dict, not the keys Args: valid_response: An example response (for example generated in _get_example_from_properties(self, spec)) Type is DICT response: The actual dict coming from the response Type is DICT Returns: A boolean - whether the actual response validates against the given example
['Validates', 'additional', 'properties', '.', 'In', 'additional', 'properties', 'we', 'only', 'need', 'to', 'compare', 'the', 'values', 'of', 'the', 'dict', 'not', 'the', 'keys']
train
https://github.com/Trax-air/swagger-parser/blob/d97f962a417e76320c59c33dcb223e4373e516d5/swagger_parser/swagger_parser.py#L453-L500
1,010
dslackw/slpkg
slpkg/sbo/network.py
SBoNetwork.choice_doinst
def choice_doinst(self): """View doinst.sh file """ if "doinst.sh" in self.sbo_files.split(): doinst_sh = ReadSBo(self.sbo_url).doinst("doinst.sh") fill = self.fill_pager(doinst_sh) self.pager(doinst_sh + fill)
python
def choice_doinst(self): """View doinst.sh file """ if "doinst.sh" in self.sbo_files.split(): doinst_sh = ReadSBo(self.sbo_url).doinst("doinst.sh") fill = self.fill_pager(doinst_sh) self.pager(doinst_sh + fill)
['def', 'choice_doinst', '(', 'self', ')', ':', 'if', '"doinst.sh"', 'in', 'self', '.', 'sbo_files', '.', 'split', '(', ')', ':', 'doinst_sh', '=', 'ReadSBo', '(', 'self', '.', 'sbo_url', ')', '.', 'doinst', '(', '"doinst.sh"', ')', 'fill', '=', 'self', '.', 'fill_pager', '(', 'doinst_sh', ')', 'self', '.', 'pager', '(', 'doinst_sh', '+', 'fill', ')']
View doinst.sh file
['View', 'doinst', '.', 'sh', 'file']
train
https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/sbo/network.py#L208-L214
1,011
emc-openstack/storops
storops/unity/resource/replication_interface.py
UnityReplicationInterface.modify
def modify(self, sp=None, ip_port=None, ip_address=None, netmask=None, v6_prefix_length=None, gateway=None, vlan_id=None): """ Modifies a replication interface. :param sp: same as the one in `create` method. :param ip_port: same as the one in `create` method. :param ip_address: same as the one in `create` method. :param netmask: same as the one in `create` method. :param v6_prefix_length: same as the one in `create` method. :param gateway: same as the one in `create` method. :param vlan_id: same as the one in `create` method. """ req_body = self._cli.make_body(sp=sp, ipPort=ip_port, ipAddress=ip_address, netmask=netmask, v6PrefixLength=v6_prefix_length, gateway=gateway, vlanId=vlan_id) resp = self.action('modify', **req_body) resp.raise_if_err() return resp
python
def modify(self, sp=None, ip_port=None, ip_address=None, netmask=None, v6_prefix_length=None, gateway=None, vlan_id=None): """ Modifies a replication interface. :param sp: same as the one in `create` method. :param ip_port: same as the one in `create` method. :param ip_address: same as the one in `create` method. :param netmask: same as the one in `create` method. :param v6_prefix_length: same as the one in `create` method. :param gateway: same as the one in `create` method. :param vlan_id: same as the one in `create` method. """ req_body = self._cli.make_body(sp=sp, ipPort=ip_port, ipAddress=ip_address, netmask=netmask, v6PrefixLength=v6_prefix_length, gateway=gateway, vlanId=vlan_id) resp = self.action('modify', **req_body) resp.raise_if_err() return resp
['def', 'modify', '(', 'self', ',', 'sp', '=', 'None', ',', 'ip_port', '=', 'None', ',', 'ip_address', '=', 'None', ',', 'netmask', '=', 'None', ',', 'v6_prefix_length', '=', 'None', ',', 'gateway', '=', 'None', ',', 'vlan_id', '=', 'None', ')', ':', 'req_body', '=', 'self', '.', '_cli', '.', 'make_body', '(', 'sp', '=', 'sp', ',', 'ipPort', '=', 'ip_port', ',', 'ipAddress', '=', 'ip_address', ',', 'netmask', '=', 'netmask', ',', 'v6PrefixLength', '=', 'v6_prefix_length', ',', 'gateway', '=', 'gateway', ',', 'vlanId', '=', 'vlan_id', ')', 'resp', '=', 'self', '.', 'action', '(', "'modify'", ',', '*', '*', 'req_body', ')', 'resp', '.', 'raise_if_err', '(', ')', 'return', 'resp']
Modifies a replication interface. :param sp: same as the one in `create` method. :param ip_port: same as the one in `create` method. :param ip_address: same as the one in `create` method. :param netmask: same as the one in `create` method. :param v6_prefix_length: same as the one in `create` method. :param gateway: same as the one in `create` method. :param vlan_id: same as the one in `create` method.
['Modifies', 'a', 'replication', 'interface', '.']
train
https://github.com/emc-openstack/storops/blob/24b4b13bf065c0ef0538dd0b5ebb8f25d24176bd/storops/unity/resource/replication_interface.py#L59-L79
1,012
dogoncouch/logdissect
logdissect/core.py
LogDissectCore.run_output
def run_output(self): """Output finalized data""" for f in logdissect.output.__formats__: ouroutput = self.output_modules[f] ouroutput.write_output(self.data_set['finalized_data'], args=self.args) del(ouroutput) # Output to terminal if silent mode is not set: if not self.args.silentmode: if self.args.verbosemode: print('\n==== ++++ ==== Output: ==== ++++ ====\n') for line in self.data_set['finalized_data']['entries']: print(line['raw_text'])
python
def run_output(self): """Output finalized data""" for f in logdissect.output.__formats__: ouroutput = self.output_modules[f] ouroutput.write_output(self.data_set['finalized_data'], args=self.args) del(ouroutput) # Output to terminal if silent mode is not set: if not self.args.silentmode: if self.args.verbosemode: print('\n==== ++++ ==== Output: ==== ++++ ====\n') for line in self.data_set['finalized_data']['entries']: print(line['raw_text'])
['def', 'run_output', '(', 'self', ')', ':', 'for', 'f', 'in', 'logdissect', '.', 'output', '.', '__formats__', ':', 'ouroutput', '=', 'self', '.', 'output_modules', '[', 'f', ']', 'ouroutput', '.', 'write_output', '(', 'self', '.', 'data_set', '[', "'finalized_data'", ']', ',', 'args', '=', 'self', '.', 'args', ')', 'del', '(', 'ouroutput', ')', '# Output to terminal if silent mode is not set:', 'if', 'not', 'self', '.', 'args', '.', 'silentmode', ':', 'if', 'self', '.', 'args', '.', 'verbosemode', ':', 'print', '(', "'\\n==== ++++ ==== Output: ==== ++++ ====\\n'", ')', 'for', 'line', 'in', 'self', '.', 'data_set', '[', "'finalized_data'", ']', '[', "'entries'", ']', ':', 'print', '(', 'line', '[', "'raw_text'", ']', ')']
Output finalized data
['Output', 'finalized', 'data']
train
https://github.com/dogoncouch/logdissect/blob/426b50264cbfa9665c86df3781e1e415ba8dbbd3/logdissect/core.py#L107-L120
1,013
ratcave/ratcave
ratcave/mesh.py
Mesh.from_incomplete_data
def from_incomplete_data(cls, vertices, normals=(), texcoords=(), **kwargs): """Return a Mesh with (vertices, normals, texcoords) as arrays, in that order. Useful for when you want a standardized array location format across different amounts of info in each mesh.""" normals = normals if hasattr(texcoords, '__iter__') and len(normals) else vertutils.calculate_normals(vertices) texcoords = texcoords if hasattr(texcoords, '__iter__') and len(texcoords) else np.zeros((vertices.shape[0], 2), dtype=np.float32) return cls(arrays=(vertices, normals, texcoords), **kwargs)
python
def from_incomplete_data(cls, vertices, normals=(), texcoords=(), **kwargs): """Return a Mesh with (vertices, normals, texcoords) as arrays, in that order. Useful for when you want a standardized array location format across different amounts of info in each mesh.""" normals = normals if hasattr(texcoords, '__iter__') and len(normals) else vertutils.calculate_normals(vertices) texcoords = texcoords if hasattr(texcoords, '__iter__') and len(texcoords) else np.zeros((vertices.shape[0], 2), dtype=np.float32) return cls(arrays=(vertices, normals, texcoords), **kwargs)
['def', 'from_incomplete_data', '(', 'cls', ',', 'vertices', ',', 'normals', '=', '(', ')', ',', 'texcoords', '=', '(', ')', ',', '*', '*', 'kwargs', ')', ':', 'normals', '=', 'normals', 'if', 'hasattr', '(', 'texcoords', ',', "'__iter__'", ')', 'and', 'len', '(', 'normals', ')', 'else', 'vertutils', '.', 'calculate_normals', '(', 'vertices', ')', 'texcoords', '=', 'texcoords', 'if', 'hasattr', '(', 'texcoords', ',', "'__iter__'", ')', 'and', 'len', '(', 'texcoords', ')', 'else', 'np', '.', 'zeros', '(', '(', 'vertices', '.', 'shape', '[', '0', ']', ',', '2', ')', ',', 'dtype', '=', 'np', '.', 'float32', ')', 'return', 'cls', '(', 'arrays', '=', '(', 'vertices', ',', 'normals', ',', 'texcoords', ')', ',', '*', '*', 'kwargs', ')']
Return a Mesh with (vertices, normals, texcoords) as arrays, in that order. Useful for when you want a standardized array location format across different amounts of info in each mesh.
['Return', 'a', 'Mesh', 'with', '(', 'vertices', 'normals', 'texcoords', ')', 'as', 'arrays', 'in', 'that', 'order', '.', 'Useful', 'for', 'when', 'you', 'want', 'a', 'standardized', 'array', 'location', 'format', 'across', 'different', 'amounts', 'of', 'info', 'in', 'each', 'mesh', '.']
train
https://github.com/ratcave/ratcave/blob/e3862cdaba100ac2c6c78c08c4b09638e0c88fd4/ratcave/mesh.py#L185-L190
1,014
jilljenn/tryalgo
tryalgo/pq_tree.py
consecutive_ones_property
def consecutive_ones_property(sets, universe=None): """ Check the consecutive ones property. :param list sets: is a list of subsets of the ground set. :param groundset: is the set of all elements, by default it is the union of the given sets :returns: returns a list of the ordered ground set where every given set is consecutive, or None if there is no solution. :complexity: O(len(groundset) * len(sets)) :disclaimer: an optimal implementation would have complexity O(len(groundset) + len(sets) + sum(map(len,sets))), and there are more recent easier algorithms for this problem. """ if universe is None: universe = set() for S in sets: universe |= set(S) tree = PQ_tree(universe) try: for S in sets: tree.reduce(S) return tree.border() except IsNotC1P: return None
python
def consecutive_ones_property(sets, universe=None): """ Check the consecutive ones property. :param list sets: is a list of subsets of the ground set. :param groundset: is the set of all elements, by default it is the union of the given sets :returns: returns a list of the ordered ground set where every given set is consecutive, or None if there is no solution. :complexity: O(len(groundset) * len(sets)) :disclaimer: an optimal implementation would have complexity O(len(groundset) + len(sets) + sum(map(len,sets))), and there are more recent easier algorithms for this problem. """ if universe is None: universe = set() for S in sets: universe |= set(S) tree = PQ_tree(universe) try: for S in sets: tree.reduce(S) return tree.border() except IsNotC1P: return None
['def', 'consecutive_ones_property', '(', 'sets', ',', 'universe', '=', 'None', ')', ':', 'if', 'universe', 'is', 'None', ':', 'universe', '=', 'set', '(', ')', 'for', 'S', 'in', 'sets', ':', 'universe', '|=', 'set', '(', 'S', ')', 'tree', '=', 'PQ_tree', '(', 'universe', ')', 'try', ':', 'for', 'S', 'in', 'sets', ':', 'tree', '.', 'reduce', '(', 'S', ')', 'return', 'tree', '.', 'border', '(', ')', 'except', 'IsNotC1P', ':', 'return', 'None']
Check the consecutive ones property. :param list sets: is a list of subsets of the ground set. :param groundset: is the set of all elements, by default it is the union of the given sets :returns: returns a list of the ordered ground set where every given set is consecutive, or None if there is no solution. :complexity: O(len(groundset) * len(sets)) :disclaimer: an optimal implementation would have complexity O(len(groundset) + len(sets) + sum(map(len,sets))), and there are more recent easier algorithms for this problem.
['Check', 'the', 'consecutive', 'ones', 'property', '.']
train
https://github.com/jilljenn/tryalgo/blob/89a4dd9655e7b6b0a176f72b4c60d0196420dfe1/tryalgo/pq_tree.py#L247-L271
1,015
sorgerlab/indra
indra/sources/bel/rdf_processor.py
BelRdfProcessor.get_activating_subs
def get_activating_subs(self): """Extract INDRA ActiveForm Statements based on a mutation from BEL. The SPARQL pattern used to extract ActiveForms due to mutations look for a ProteinAbundance as a subject which has a child encoding the amino acid substitution. The object of the statement is an ActivityType of the same ProteinAbundance, which is either increased or decreased. Examples: proteinAbundance(HGNC:NRAS,substitution(Q,61,K)) directlyIncreases gtpBoundActivity(proteinAbundance(HGNC:NRAS)) proteinAbundance(HGNC:TP53,substitution(F,134,I)) directlyDecreases transcriptionalActivity(proteinAbundance(HGNC:TP53)) """ q_mods = prefixes + """ SELECT ?enzyme_name ?sub_label ?act_type ?rel ?stmt ?subject WHERE { ?stmt a belvoc:Statement . ?stmt belvoc:hasRelationship ?rel . ?stmt belvoc:hasSubject ?subject . ?stmt belvoc:hasObject ?object . ?subject a belvoc:ProteinAbundance . ?subject belvoc:hasConcept ?enzyme_name . ?subject belvoc:hasChild ?sub_expr . ?sub_expr rdfs:label ?sub_label . ?object a belvoc:AbundanceActivity . ?object belvoc:hasActivityType ?act_type . ?object belvoc:hasChild ?enzyme . ?enzyme a belvoc:ProteinAbundance . ?enzyme belvoc:hasConcept ?enzyme_name . } """ # Now make the PySB for the phosphorylation res_mods = self.g.query(q_mods) for stmt in res_mods: evidence = self._get_evidence(stmt[4]) # Parse out the elements of the query enz = self._get_agent(stmt[0], stmt[5]) sub_expr = term_from_uri(stmt[1]) act_type = term_from_uri(stmt[2]).lower() # Parse the WT and substituted residues from the node label. # Strangely, the RDF for substituted residue doesn't break the # terms of the BEL expression down into their meaning, as happens # for modified protein abundances. Instead, the substitution # just comes back as a string, e.g., "sub(V,600,E)". This code # parses the arguments back out using a regular expression. match = re.match('sub\(([A-Z]),([0-9]*),([A-Z])\)', sub_expr) if match: matches = match.groups() wt_residue = matches[0] position = matches[1] sub_residue = matches[2] else: logger.warning("Could not parse substitution expression %s" % sub_expr) continue mc = MutCondition(position, wt_residue, sub_residue) enz.mutations = [mc] rel = strip_statement(stmt[3]) if rel == 'DirectlyDecreases': is_active = False else: is_active = True stmt_str = strip_statement(stmt[4]) # Mark this as a converted statement self.converted_direct_stmts.append(stmt_str) st = ActiveForm(enz, act_type, is_active, evidence) self.statements.append(st)
python
def get_activating_subs(self): """Extract INDRA ActiveForm Statements based on a mutation from BEL. The SPARQL pattern used to extract ActiveForms due to mutations look for a ProteinAbundance as a subject which has a child encoding the amino acid substitution. The object of the statement is an ActivityType of the same ProteinAbundance, which is either increased or decreased. Examples: proteinAbundance(HGNC:NRAS,substitution(Q,61,K)) directlyIncreases gtpBoundActivity(proteinAbundance(HGNC:NRAS)) proteinAbundance(HGNC:TP53,substitution(F,134,I)) directlyDecreases transcriptionalActivity(proteinAbundance(HGNC:TP53)) """ q_mods = prefixes + """ SELECT ?enzyme_name ?sub_label ?act_type ?rel ?stmt ?subject WHERE { ?stmt a belvoc:Statement . ?stmt belvoc:hasRelationship ?rel . ?stmt belvoc:hasSubject ?subject . ?stmt belvoc:hasObject ?object . ?subject a belvoc:ProteinAbundance . ?subject belvoc:hasConcept ?enzyme_name . ?subject belvoc:hasChild ?sub_expr . ?sub_expr rdfs:label ?sub_label . ?object a belvoc:AbundanceActivity . ?object belvoc:hasActivityType ?act_type . ?object belvoc:hasChild ?enzyme . ?enzyme a belvoc:ProteinAbundance . ?enzyme belvoc:hasConcept ?enzyme_name . } """ # Now make the PySB for the phosphorylation res_mods = self.g.query(q_mods) for stmt in res_mods: evidence = self._get_evidence(stmt[4]) # Parse out the elements of the query enz = self._get_agent(stmt[0], stmt[5]) sub_expr = term_from_uri(stmt[1]) act_type = term_from_uri(stmt[2]).lower() # Parse the WT and substituted residues from the node label. # Strangely, the RDF for substituted residue doesn't break the # terms of the BEL expression down into their meaning, as happens # for modified protein abundances. Instead, the substitution # just comes back as a string, e.g., "sub(V,600,E)". This code # parses the arguments back out using a regular expression. match = re.match('sub\(([A-Z]),([0-9]*),([A-Z])\)', sub_expr) if match: matches = match.groups() wt_residue = matches[0] position = matches[1] sub_residue = matches[2] else: logger.warning("Could not parse substitution expression %s" % sub_expr) continue mc = MutCondition(position, wt_residue, sub_residue) enz.mutations = [mc] rel = strip_statement(stmt[3]) if rel == 'DirectlyDecreases': is_active = False else: is_active = True stmt_str = strip_statement(stmt[4]) # Mark this as a converted statement self.converted_direct_stmts.append(stmt_str) st = ActiveForm(enz, act_type, is_active, evidence) self.statements.append(st)
['def', 'get_activating_subs', '(', 'self', ')', ':', 'q_mods', '=', 'prefixes', '+', '"""\n SELECT ?enzyme_name ?sub_label ?act_type ?rel ?stmt ?subject\n WHERE {\n ?stmt a belvoc:Statement .\n ?stmt belvoc:hasRelationship ?rel .\n ?stmt belvoc:hasSubject ?subject .\n ?stmt belvoc:hasObject ?object .\n ?subject a belvoc:ProteinAbundance .\n ?subject belvoc:hasConcept ?enzyme_name .\n ?subject belvoc:hasChild ?sub_expr .\n ?sub_expr rdfs:label ?sub_label .\n ?object a belvoc:AbundanceActivity .\n ?object belvoc:hasActivityType ?act_type .\n ?object belvoc:hasChild ?enzyme .\n ?enzyme a belvoc:ProteinAbundance .\n ?enzyme belvoc:hasConcept ?enzyme_name .\n }\n """', '# Now make the PySB for the phosphorylation', 'res_mods', '=', 'self', '.', 'g', '.', 'query', '(', 'q_mods', ')', 'for', 'stmt', 'in', 'res_mods', ':', 'evidence', '=', 'self', '.', '_get_evidence', '(', 'stmt', '[', '4', ']', ')', '# Parse out the elements of the query', 'enz', '=', 'self', '.', '_get_agent', '(', 'stmt', '[', '0', ']', ',', 'stmt', '[', '5', ']', ')', 'sub_expr', '=', 'term_from_uri', '(', 'stmt', '[', '1', ']', ')', 'act_type', '=', 'term_from_uri', '(', 'stmt', '[', '2', ']', ')', '.', 'lower', '(', ')', '# Parse the WT and substituted residues from the node label.', "# Strangely, the RDF for substituted residue doesn't break the", '# terms of the BEL expression down into their meaning, as happens', '# for modified protein abundances. Instead, the substitution', '# just comes back as a string, e.g., "sub(V,600,E)". This code', '# parses the arguments back out using a regular expression.', 'match', '=', 're', '.', 'match', '(', "'sub\\(([A-Z]),([0-9]*),([A-Z])\\)'", ',', 'sub_expr', ')', 'if', 'match', ':', 'matches', '=', 'match', '.', 'groups', '(', ')', 'wt_residue', '=', 'matches', '[', '0', ']', 'position', '=', 'matches', '[', '1', ']', 'sub_residue', '=', 'matches', '[', '2', ']', 'else', ':', 'logger', '.', 'warning', '(', '"Could not parse substitution expression %s"', '%', 'sub_expr', ')', 'continue', 'mc', '=', 'MutCondition', '(', 'position', ',', 'wt_residue', ',', 'sub_residue', ')', 'enz', '.', 'mutations', '=', '[', 'mc', ']', 'rel', '=', 'strip_statement', '(', 'stmt', '[', '3', ']', ')', 'if', 'rel', '==', "'DirectlyDecreases'", ':', 'is_active', '=', 'False', 'else', ':', 'is_active', '=', 'True', 'stmt_str', '=', 'strip_statement', '(', 'stmt', '[', '4', ']', ')', '# Mark this as a converted statement', 'self', '.', 'converted_direct_stmts', '.', 'append', '(', 'stmt_str', ')', 'st', '=', 'ActiveForm', '(', 'enz', ',', 'act_type', ',', 'is_active', ',', 'evidence', ')', 'self', '.', 'statements', '.', 'append', '(', 'st', ')']
Extract INDRA ActiveForm Statements based on a mutation from BEL. The SPARQL pattern used to extract ActiveForms due to mutations look for a ProteinAbundance as a subject which has a child encoding the amino acid substitution. The object of the statement is an ActivityType of the same ProteinAbundance, which is either increased or decreased. Examples: proteinAbundance(HGNC:NRAS,substitution(Q,61,K)) directlyIncreases gtpBoundActivity(proteinAbundance(HGNC:NRAS)) proteinAbundance(HGNC:TP53,substitution(F,134,I)) directlyDecreases transcriptionalActivity(proteinAbundance(HGNC:TP53))
['Extract', 'INDRA', 'ActiveForm', 'Statements', 'based', 'on', 'a', 'mutation', 'from', 'BEL', '.']
train
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/bel/rdf_processor.py#L346-L421
1,016
eandersson/amqpstorm
amqpstorm/exchange.py
Exchange.bind
def bind(self, destination='', source='', routing_key='', arguments=None): """Bind an Exchange. :param str destination: Exchange name :param str source: Exchange to bind to :param str routing_key: The routing key to use :param dict arguments: Bind key/value arguments :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: dict """ if not compatibility.is_string(destination): raise AMQPInvalidArgument('destination should be a string') elif not compatibility.is_string(source): raise AMQPInvalidArgument('source should be a string') elif not compatibility.is_string(routing_key): raise AMQPInvalidArgument('routing_key should be a string') elif arguments is not None and not isinstance(arguments, dict): raise AMQPInvalidArgument('arguments should be a dict or None') bind_frame = pamqp_exchange.Bind(destination=destination, source=source, routing_key=routing_key, arguments=arguments) return self._channel.rpc_request(bind_frame)
python
def bind(self, destination='', source='', routing_key='', arguments=None): """Bind an Exchange. :param str destination: Exchange name :param str source: Exchange to bind to :param str routing_key: The routing key to use :param dict arguments: Bind key/value arguments :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: dict """ if not compatibility.is_string(destination): raise AMQPInvalidArgument('destination should be a string') elif not compatibility.is_string(source): raise AMQPInvalidArgument('source should be a string') elif not compatibility.is_string(routing_key): raise AMQPInvalidArgument('routing_key should be a string') elif arguments is not None and not isinstance(arguments, dict): raise AMQPInvalidArgument('arguments should be a dict or None') bind_frame = pamqp_exchange.Bind(destination=destination, source=source, routing_key=routing_key, arguments=arguments) return self._channel.rpc_request(bind_frame)
['def', 'bind', '(', 'self', ',', 'destination', '=', "''", ',', 'source', '=', "''", ',', 'routing_key', '=', "''", ',', 'arguments', '=', 'None', ')', ':', 'if', 'not', 'compatibility', '.', 'is_string', '(', 'destination', ')', ':', 'raise', 'AMQPInvalidArgument', '(', "'destination should be a string'", ')', 'elif', 'not', 'compatibility', '.', 'is_string', '(', 'source', ')', ':', 'raise', 'AMQPInvalidArgument', '(', "'source should be a string'", ')', 'elif', 'not', 'compatibility', '.', 'is_string', '(', 'routing_key', ')', ':', 'raise', 'AMQPInvalidArgument', '(', "'routing_key should be a string'", ')', 'elif', 'arguments', 'is', 'not', 'None', 'and', 'not', 'isinstance', '(', 'arguments', ',', 'dict', ')', ':', 'raise', 'AMQPInvalidArgument', '(', "'arguments should be a dict or None'", ')', 'bind_frame', '=', 'pamqp_exchange', '.', 'Bind', '(', 'destination', '=', 'destination', ',', 'source', '=', 'source', ',', 'routing_key', '=', 'routing_key', ',', 'arguments', '=', 'arguments', ')', 'return', 'self', '.', '_channel', '.', 'rpc_request', '(', 'bind_frame', ')']
Bind an Exchange. :param str destination: Exchange name :param str source: Exchange to bind to :param str routing_key: The routing key to use :param dict arguments: Bind key/value arguments :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: dict
['Bind', 'an', 'Exchange', '.']
train
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/exchange.py#L77-L106
1,017
ga4gh/ga4gh-server
ga4gh/server/datamodel/variants.py
HtslibVariantSet.getVariants
def getVariants(self, referenceName, startPosition, endPosition, callSetIds=[]): """ Returns an iterator over the specified variants. The parameters correspond to the attributes of a GASearchVariantsRequest object. """ if callSetIds is None: callSetIds = self._callSetIds else: for callSetId in callSetIds: if callSetId not in self._callSetIds: raise exceptions.CallSetNotInVariantSetException( callSetId, self.getId()) for record in self.getPysamVariants( referenceName, startPosition, endPosition): yield self.convertVariant(record, callSetIds)
python
def getVariants(self, referenceName, startPosition, endPosition, callSetIds=[]): """ Returns an iterator over the specified variants. The parameters correspond to the attributes of a GASearchVariantsRequest object. """ if callSetIds is None: callSetIds = self._callSetIds else: for callSetId in callSetIds: if callSetId not in self._callSetIds: raise exceptions.CallSetNotInVariantSetException( callSetId, self.getId()) for record in self.getPysamVariants( referenceName, startPosition, endPosition): yield self.convertVariant(record, callSetIds)
['def', 'getVariants', '(', 'self', ',', 'referenceName', ',', 'startPosition', ',', 'endPosition', ',', 'callSetIds', '=', '[', ']', ')', ':', 'if', 'callSetIds', 'is', 'None', ':', 'callSetIds', '=', 'self', '.', '_callSetIds', 'else', ':', 'for', 'callSetId', 'in', 'callSetIds', ':', 'if', 'callSetId', 'not', 'in', 'self', '.', '_callSetIds', ':', 'raise', 'exceptions', '.', 'CallSetNotInVariantSetException', '(', 'callSetId', ',', 'self', '.', 'getId', '(', ')', ')', 'for', 'record', 'in', 'self', '.', 'getPysamVariants', '(', 'referenceName', ',', 'startPosition', ',', 'endPosition', ')', ':', 'yield', 'self', '.', 'convertVariant', '(', 'record', ',', 'callSetIds', ')']
Returns an iterator over the specified variants. The parameters correspond to the attributes of a GASearchVariantsRequest object.
['Returns', 'an', 'iterator', 'over', 'the', 'specified', 'variants', '.', 'The', 'parameters', 'correspond', 'to', 'the', 'attributes', 'of', 'a', 'GASearchVariantsRequest', 'object', '.']
train
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datamodel/variants.py#L749-L764
1,018
Tenchi2xh/Almonds
almonds/mandelbrot.py
zoom
def zoom(params, factor): """ Applies a zoom on the current parameters. Computes the top-left plane-space coordinates from the Mandelbrot-space coordinates. :param params: Current application parameters. :param factor: Zoom factor by which the zoom ratio is divided (bigger factor, more zoom) """ params.zoom /= factor n_x = params.mb_cx / params.zoom n_y = params.mb_cy / params.zoom params.plane_x0 = int((n_x + 1.0) * params.plane_w / (2.0 * params.plane_ratio)) - params.plane_w // 2 params.plane_y0 = int((n_y + 1.0) * params.plane_h / 2.0) - params.plane_h // 2
python
def zoom(params, factor): """ Applies a zoom on the current parameters. Computes the top-left plane-space coordinates from the Mandelbrot-space coordinates. :param params: Current application parameters. :param factor: Zoom factor by which the zoom ratio is divided (bigger factor, more zoom) """ params.zoom /= factor n_x = params.mb_cx / params.zoom n_y = params.mb_cy / params.zoom params.plane_x0 = int((n_x + 1.0) * params.plane_w / (2.0 * params.plane_ratio)) - params.plane_w // 2 params.plane_y0 = int((n_y + 1.0) * params.plane_h / 2.0) - params.plane_h // 2
['def', 'zoom', '(', 'params', ',', 'factor', ')', ':', 'params', '.', 'zoom', '/=', 'factor', 'n_x', '=', 'params', '.', 'mb_cx', '/', 'params', '.', 'zoom', 'n_y', '=', 'params', '.', 'mb_cy', '/', 'params', '.', 'zoom', 'params', '.', 'plane_x0', '=', 'int', '(', '(', 'n_x', '+', '1.0', ')', '*', 'params', '.', 'plane_w', '/', '(', '2.0', '*', 'params', '.', 'plane_ratio', ')', ')', '-', 'params', '.', 'plane_w', '//', '2', 'params', '.', 'plane_y0', '=', 'int', '(', '(', 'n_y', '+', '1.0', ')', '*', 'params', '.', 'plane_h', '/', '2.0', ')', '-', 'params', '.', 'plane_h', '//', '2']
Applies a zoom on the current parameters. Computes the top-left plane-space coordinates from the Mandelbrot-space coordinates. :param params: Current application parameters. :param factor: Zoom factor by which the zoom ratio is divided (bigger factor, more zoom)
['Applies', 'a', 'zoom', 'on', 'the', 'current', 'parameters', '.']
train
https://github.com/Tenchi2xh/Almonds/blob/6b27024729f055f2cb5e14ae3ca3cb428ae054bc/almonds/mandelbrot.py#L117-L132
1,019
materialsproject/pymatgen
pymatgen/util/plotting.py
pretty_polyfit_plot
def pretty_polyfit_plot(x, y, deg=1, xlabel=None, ylabel=None, **kwargs): """ Convenience method to plot data with trend lines based on polynomial fit. Args: x: Sequence of x data. y: Sequence of y data. deg (int): Degree of polynomial. Defaults to 1. xlabel (str): Label for x-axis. ylabel (str): Label for y-axis. \\*\\*kwargs: Keyword args passed to pretty_plot. Returns: matplotlib.pyplot object. """ plt = pretty_plot(**kwargs) pp = np.polyfit(x, y, deg) xp = np.linspace(min(x), max(x), 200) plt.plot(xp, np.polyval(pp, xp), 'k--', x, y, 'o') if xlabel: plt.xlabel(xlabel) if ylabel: plt.ylabel(ylabel) return plt
python
def pretty_polyfit_plot(x, y, deg=1, xlabel=None, ylabel=None, **kwargs): """ Convenience method to plot data with trend lines based on polynomial fit. Args: x: Sequence of x data. y: Sequence of y data. deg (int): Degree of polynomial. Defaults to 1. xlabel (str): Label for x-axis. ylabel (str): Label for y-axis. \\*\\*kwargs: Keyword args passed to pretty_plot. Returns: matplotlib.pyplot object. """ plt = pretty_plot(**kwargs) pp = np.polyfit(x, y, deg) xp = np.linspace(min(x), max(x), 200) plt.plot(xp, np.polyval(pp, xp), 'k--', x, y, 'o') if xlabel: plt.xlabel(xlabel) if ylabel: plt.ylabel(ylabel) return plt
['def', 'pretty_polyfit_plot', '(', 'x', ',', 'y', ',', 'deg', '=', '1', ',', 'xlabel', '=', 'None', ',', 'ylabel', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'plt', '=', 'pretty_plot', '(', '*', '*', 'kwargs', ')', 'pp', '=', 'np', '.', 'polyfit', '(', 'x', ',', 'y', ',', 'deg', ')', 'xp', '=', 'np', '.', 'linspace', '(', 'min', '(', 'x', ')', ',', 'max', '(', 'x', ')', ',', '200', ')', 'plt', '.', 'plot', '(', 'xp', ',', 'np', '.', 'polyval', '(', 'pp', ',', 'xp', ')', ',', "'k--'", ',', 'x', ',', 'y', ',', "'o'", ')', 'if', 'xlabel', ':', 'plt', '.', 'xlabel', '(', 'xlabel', ')', 'if', 'ylabel', ':', 'plt', '.', 'ylabel', '(', 'ylabel', ')', 'return', 'plt']
Convenience method to plot data with trend lines based on polynomial fit. Args: x: Sequence of x data. y: Sequence of y data. deg (int): Degree of polynomial. Defaults to 1. xlabel (str): Label for x-axis. ylabel (str): Label for y-axis. \\*\\*kwargs: Keyword args passed to pretty_plot. Returns: matplotlib.pyplot object.
['Convenience', 'method', 'to', 'plot', 'data', 'with', 'trend', 'lines', 'based', 'on', 'polynomial', 'fit', '.']
train
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/util/plotting.py#L157-L180
1,020
Kortemme-Lab/klab
klab/bio/sifts.py
SIFTS.retrieve
def retrieve(pdb_id, cache_dir = None, acceptable_sequence_percentage_match = 70.0, require_uniprot_residue_mapping = True, bio_cache = None): '''Creates a PDBML object by using a cached copy of the files if they exists or by retrieving the files from the RCSB. bio_cache should be a klab.bio.cache.py::BioCache object and is used to avoid reading/downloading cached files repeatedly. ''' pdb_contents = None xml_contents = None pdb_id = pdb_id.upper() l_pdb_id = pdb_id.lower() if len(pdb_id) != 4 or not pdb_id.isalnum(): raise Exception("Bad PDB identifier '%s'." % pdb_id) if bio_cache: pdb_contents = bio_cache.get_pdb_contents(pdb_id) xml_contents = bio_cache.get_sifts_xml_contents(pdb_id) if cache_dir: if not pdb_contents: # Check to see whether we have a cached copy of the PDB file filename = os.path.join(cache_dir, "%s.pdb" % pdb_id) if os.path.exists(filename): pdb_contents = read_file(filename) if not xml_contents: # Check to see whether we have a cached copy of the XML file filename = os.path.join(cache_dir, "%s.sifts.xml.gz" % l_pdb_id) if os.path.exists(filename): xml_contents = read_file(filename) # Get any missing files from the RCSB and create cached copies if appropriate if not pdb_contents: pdb_contents = rcsb.retrieve_pdb(pdb_id) if cache_dir: write_file(os.path.join(cache_dir, "%s.pdb" % pdb_id), pdb_contents) if not xml_contents: try: xml_contents = retrieve_xml(pdb_id, silent = False) if cache_dir: write_file(os.path.join(cache_dir, "%s.sifts.xml.gz" % l_pdb_id), xml_contents) except FTPException550: raise MissingSIFTSRecord('The file "%s.sifts.xml.gz" could not be found on the EBI FTP server.' % l_pdb_id) xml_contents = xml_contents # Return the object handler = SIFTS(xml_contents, pdb_contents, acceptable_sequence_percentage_match = acceptable_sequence_percentage_match, cache_dir = cache_dir, require_uniprot_residue_mapping = require_uniprot_residue_mapping, bio_cache = bio_cache, pdb_id = pdb_id) xml.sax.parseString(xml_contents, handler) return handler
python
def retrieve(pdb_id, cache_dir = None, acceptable_sequence_percentage_match = 70.0, require_uniprot_residue_mapping = True, bio_cache = None): '''Creates a PDBML object by using a cached copy of the files if they exists or by retrieving the files from the RCSB. bio_cache should be a klab.bio.cache.py::BioCache object and is used to avoid reading/downloading cached files repeatedly. ''' pdb_contents = None xml_contents = None pdb_id = pdb_id.upper() l_pdb_id = pdb_id.lower() if len(pdb_id) != 4 or not pdb_id.isalnum(): raise Exception("Bad PDB identifier '%s'." % pdb_id) if bio_cache: pdb_contents = bio_cache.get_pdb_contents(pdb_id) xml_contents = bio_cache.get_sifts_xml_contents(pdb_id) if cache_dir: if not pdb_contents: # Check to see whether we have a cached copy of the PDB file filename = os.path.join(cache_dir, "%s.pdb" % pdb_id) if os.path.exists(filename): pdb_contents = read_file(filename) if not xml_contents: # Check to see whether we have a cached copy of the XML file filename = os.path.join(cache_dir, "%s.sifts.xml.gz" % l_pdb_id) if os.path.exists(filename): xml_contents = read_file(filename) # Get any missing files from the RCSB and create cached copies if appropriate if not pdb_contents: pdb_contents = rcsb.retrieve_pdb(pdb_id) if cache_dir: write_file(os.path.join(cache_dir, "%s.pdb" % pdb_id), pdb_contents) if not xml_contents: try: xml_contents = retrieve_xml(pdb_id, silent = False) if cache_dir: write_file(os.path.join(cache_dir, "%s.sifts.xml.gz" % l_pdb_id), xml_contents) except FTPException550: raise MissingSIFTSRecord('The file "%s.sifts.xml.gz" could not be found on the EBI FTP server.' % l_pdb_id) xml_contents = xml_contents # Return the object handler = SIFTS(xml_contents, pdb_contents, acceptable_sequence_percentage_match = acceptable_sequence_percentage_match, cache_dir = cache_dir, require_uniprot_residue_mapping = require_uniprot_residue_mapping, bio_cache = bio_cache, pdb_id = pdb_id) xml.sax.parseString(xml_contents, handler) return handler
['def', 'retrieve', '(', 'pdb_id', ',', 'cache_dir', '=', 'None', ',', 'acceptable_sequence_percentage_match', '=', '70.0', ',', 'require_uniprot_residue_mapping', '=', 'True', ',', 'bio_cache', '=', 'None', ')', ':', 'pdb_contents', '=', 'None', 'xml_contents', '=', 'None', 'pdb_id', '=', 'pdb_id', '.', 'upper', '(', ')', 'l_pdb_id', '=', 'pdb_id', '.', 'lower', '(', ')', 'if', 'len', '(', 'pdb_id', ')', '!=', '4', 'or', 'not', 'pdb_id', '.', 'isalnum', '(', ')', ':', 'raise', 'Exception', '(', '"Bad PDB identifier \'%s\'."', '%', 'pdb_id', ')', 'if', 'bio_cache', ':', 'pdb_contents', '=', 'bio_cache', '.', 'get_pdb_contents', '(', 'pdb_id', ')', 'xml_contents', '=', 'bio_cache', '.', 'get_sifts_xml_contents', '(', 'pdb_id', ')', 'if', 'cache_dir', ':', 'if', 'not', 'pdb_contents', ':', '# Check to see whether we have a cached copy of the PDB file', 'filename', '=', 'os', '.', 'path', '.', 'join', '(', 'cache_dir', ',', '"%s.pdb"', '%', 'pdb_id', ')', 'if', 'os', '.', 'path', '.', 'exists', '(', 'filename', ')', ':', 'pdb_contents', '=', 'read_file', '(', 'filename', ')', 'if', 'not', 'xml_contents', ':', '# Check to see whether we have a cached copy of the XML file', 'filename', '=', 'os', '.', 'path', '.', 'join', '(', 'cache_dir', ',', '"%s.sifts.xml.gz"', '%', 'l_pdb_id', ')', 'if', 'os', '.', 'path', '.', 'exists', '(', 'filename', ')', ':', 'xml_contents', '=', 'read_file', '(', 'filename', ')', '# Get any missing files from the RCSB and create cached copies if appropriate', 'if', 'not', 'pdb_contents', ':', 'pdb_contents', '=', 'rcsb', '.', 'retrieve_pdb', '(', 'pdb_id', ')', 'if', 'cache_dir', ':', 'write_file', '(', 'os', '.', 'path', '.', 'join', '(', 'cache_dir', ',', '"%s.pdb"', '%', 'pdb_id', ')', ',', 'pdb_contents', ')', 'if', 'not', 'xml_contents', ':', 'try', ':', 'xml_contents', '=', 'retrieve_xml', '(', 'pdb_id', ',', 'silent', '=', 'False', ')', 'if', 'cache_dir', ':', 'write_file', '(', 'os', '.', 'path', '.', 'join', '(', 'cache_dir', ',', '"%s.sifts.xml.gz"', '%', 'l_pdb_id', ')', ',', 'xml_contents', ')', 'except', 'FTPException550', ':', 'raise', 'MissingSIFTSRecord', '(', '\'The file "%s.sifts.xml.gz" could not be found on the EBI FTP server.\'', '%', 'l_pdb_id', ')', 'xml_contents', '=', 'xml_contents', '# Return the object', 'handler', '=', 'SIFTS', '(', 'xml_contents', ',', 'pdb_contents', ',', 'acceptable_sequence_percentage_match', '=', 'acceptable_sequence_percentage_match', ',', 'cache_dir', '=', 'cache_dir', ',', 'require_uniprot_residue_mapping', '=', 'require_uniprot_residue_mapping', ',', 'bio_cache', '=', 'bio_cache', ',', 'pdb_id', '=', 'pdb_id', ')', 'xml', '.', 'sax', '.', 'parseString', '(', 'xml_contents', ',', 'handler', ')', 'return', 'handler']
Creates a PDBML object by using a cached copy of the files if they exists or by retrieving the files from the RCSB. bio_cache should be a klab.bio.cache.py::BioCache object and is used to avoid reading/downloading cached files repeatedly.
['Creates', 'a', 'PDBML', 'object', 'by', 'using', 'a', 'cached', 'copy', 'of', 'the', 'files', 'if', 'they', 'exists', 'or', 'by', 'retrieving', 'the', 'files', 'from', 'the', 'RCSB', '.', 'bio_cache', 'should', 'be', 'a', 'klab', '.', 'bio', '.', 'cache', '.', 'py', '::', 'BioCache', 'object', 'and', 'is', 'used', 'to', 'avoid', 'reading', '/', 'downloading', 'cached', 'files', 'repeatedly', '.']
train
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/sifts.py#L325-L375
1,021
Erotemic/utool
utool/util_type.py
get_type
def get_type(var): """ Gets types accounting for numpy Ignore: import utool as ut import pandas as pd var = np.array(['a', 'b', 'c']) ut.get_type(var) var = pd.Index(['a', 'b', 'c']) ut.get_type(var) """ if HAVE_NUMPY and isinstance(var, np.ndarray): if _WIN32: # This is a weird system specific error # https://github.com/numpy/numpy/issues/3667 type_ = var.dtype else: type_ = var.dtype.type elif HAVE_PANDAS and isinstance(var, pd.Index): if _WIN32: type_ = var.dtype else: type_ = var.dtype.type else: type_ = type(var) return type_
python
def get_type(var): """ Gets types accounting for numpy Ignore: import utool as ut import pandas as pd var = np.array(['a', 'b', 'c']) ut.get_type(var) var = pd.Index(['a', 'b', 'c']) ut.get_type(var) """ if HAVE_NUMPY and isinstance(var, np.ndarray): if _WIN32: # This is a weird system specific error # https://github.com/numpy/numpy/issues/3667 type_ = var.dtype else: type_ = var.dtype.type elif HAVE_PANDAS and isinstance(var, pd.Index): if _WIN32: type_ = var.dtype else: type_ = var.dtype.type else: type_ = type(var) return type_
['def', 'get_type', '(', 'var', ')', ':', 'if', 'HAVE_NUMPY', 'and', 'isinstance', '(', 'var', ',', 'np', '.', 'ndarray', ')', ':', 'if', '_WIN32', ':', '# This is a weird system specific error', '# https://github.com/numpy/numpy/issues/3667', 'type_', '=', 'var', '.', 'dtype', 'else', ':', 'type_', '=', 'var', '.', 'dtype', '.', 'type', 'elif', 'HAVE_PANDAS', 'and', 'isinstance', '(', 'var', ',', 'pd', '.', 'Index', ')', ':', 'if', '_WIN32', ':', 'type_', '=', 'var', '.', 'dtype', 'else', ':', 'type_', '=', 'var', '.', 'dtype', '.', 'type', 'else', ':', 'type_', '=', 'type', '(', 'var', ')', 'return', 'type_']
Gets types accounting for numpy Ignore: import utool as ut import pandas as pd var = np.array(['a', 'b', 'c']) ut.get_type(var) var = pd.Index(['a', 'b', 'c']) ut.get_type(var)
['Gets', 'types', 'accounting', 'for', 'numpy']
train
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_type.py#L377-L403
1,022
matthewgilbert/mapping
mapping/mappings.py
static_transition
def static_transition(timestamp, contract_dates, transition, holidays=None, validate_inputs=True): """ An implementation of *get_weights* parameter in roller(). Return weights to tradeable instruments for a given date based on a transition DataFrame which indicates how to roll through the roll period. Parameters ---------- timestamp: pandas.Timestamp The timestamp to return instrument weights for contract_dates: pandas.Series Series with index of tradeable contract names and pandas.Timestamps representing the last date of the roll as values, sorted by values. Index must be unique and values must be strictly monotonic. transition: pandas.DataFrame A DataFrame with a index of integers representing business day offsets from the last roll date and a column which is a MultiIndex where the top level is generic instruments and the second level is ['front', 'back'] which refer to the front month contract and the back month contract of the roll. Note that for different generics, e.g. CL1, CL2, the front and back month contract during a roll would refer to different underlying instruments. The values represent the fraction of the roll on each day during the roll period. The first row of the transition period should be completely allocated to the front contract and the last row should be completely allocated to the back contract. holidays: array_like of datetime64[D] Holidays to exclude when calculating business day offsets from the last roll date. See numpy.busday_count. validate_inputs: Boolean Whether or not to validate ordering of contract_dates and transition. **Caution** this is provided for speed however if this is set to False and inputs are not defined properly algorithm may return incorrect data. Returns ------- A list of tuples consisting of the generic instrument name, the tradeable contract as a string, the weight on this contract as a float and the date as a pandas.Timestamp. Examples -------- >>> import pandas as pd >>> import mapping.mappings as mappings >>> cols = pd.MultiIndex.from_product([["CL1", "CL2"], ['front', 'back']]) >>> idx = [-2, -1, 0] >>> transition = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5], ... [0.0, 1.0, 0.0, 1.0]], ... index=idx, columns=cols) >>> contract_dates = pd.Series([pd.Timestamp('2016-10-20'), ... pd.Timestamp('2016-11-21'), ... pd.Timestamp('2016-12-20')], ... index=['CLX16', 'CLZ16', 'CLF17']) >>> ts = pd.Timestamp('2016-10-19') >>> wts = mappings.static_transition(ts, contract_dates, transition) """ if validate_inputs: # required for MultiIndex slicing _check_static(transition.sort_index(axis=1)) # the algorithm below will return invalid results if contract_dates is # not as expected so better to fail explicitly _check_contract_dates(contract_dates) if not holidays: holidays = [] # further speedup can be obtained using contract_dates.loc[timestamp:] # but this requires swapping contract_dates index and values after_contract_dates = contract_dates.loc[contract_dates >= timestamp] contracts = after_contract_dates.index front_expiry_dt = after_contract_dates.iloc[0] days_to_expiry = np.busday_count(front_expiry_dt.date(), timestamp.date(), holidays=holidays) name2num = dict(zip(transition.columns.levels[0], range(len(transition.columns.levels[0])))) if days_to_expiry in transition.index: weights_iter = transition.loc[days_to_expiry].iteritems() # roll hasn't started yet elif days_to_expiry < transition.index.min(): # provides significant speedup over transition.iloc[0].iteritems() vals = transition.values[0] weights_iter = zip(transition.columns.tolist(), vals) # roll is finished else: vals = transition.values[-1] weights_iter = zip(transition.columns.tolist(), vals) cwts = [] for idx_tuple, weighting in weights_iter: gen_name, position = idx_tuple if weighting != 0: if position == "front": cntrct_idx = name2num[gen_name] elif position == "back": cntrct_idx = name2num[gen_name] + 1 try: cntrct_name = contracts[cntrct_idx] except IndexError as e: raise type(e)(("index {0} is out of bounds in\n{1}\nas of {2} " "resulting from {3} mapping") .format(cntrct_idx, after_contract_dates, timestamp, idx_tuple) ).with_traceback(sys.exc_info()[2]) cwts.append((gen_name, cntrct_name, weighting, timestamp)) return cwts
python
def static_transition(timestamp, contract_dates, transition, holidays=None, validate_inputs=True): """ An implementation of *get_weights* parameter in roller(). Return weights to tradeable instruments for a given date based on a transition DataFrame which indicates how to roll through the roll period. Parameters ---------- timestamp: pandas.Timestamp The timestamp to return instrument weights for contract_dates: pandas.Series Series with index of tradeable contract names and pandas.Timestamps representing the last date of the roll as values, sorted by values. Index must be unique and values must be strictly monotonic. transition: pandas.DataFrame A DataFrame with a index of integers representing business day offsets from the last roll date and a column which is a MultiIndex where the top level is generic instruments and the second level is ['front', 'back'] which refer to the front month contract and the back month contract of the roll. Note that for different generics, e.g. CL1, CL2, the front and back month contract during a roll would refer to different underlying instruments. The values represent the fraction of the roll on each day during the roll period. The first row of the transition period should be completely allocated to the front contract and the last row should be completely allocated to the back contract. holidays: array_like of datetime64[D] Holidays to exclude when calculating business day offsets from the last roll date. See numpy.busday_count. validate_inputs: Boolean Whether or not to validate ordering of contract_dates and transition. **Caution** this is provided for speed however if this is set to False and inputs are not defined properly algorithm may return incorrect data. Returns ------- A list of tuples consisting of the generic instrument name, the tradeable contract as a string, the weight on this contract as a float and the date as a pandas.Timestamp. Examples -------- >>> import pandas as pd >>> import mapping.mappings as mappings >>> cols = pd.MultiIndex.from_product([["CL1", "CL2"], ['front', 'back']]) >>> idx = [-2, -1, 0] >>> transition = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5], ... [0.0, 1.0, 0.0, 1.0]], ... index=idx, columns=cols) >>> contract_dates = pd.Series([pd.Timestamp('2016-10-20'), ... pd.Timestamp('2016-11-21'), ... pd.Timestamp('2016-12-20')], ... index=['CLX16', 'CLZ16', 'CLF17']) >>> ts = pd.Timestamp('2016-10-19') >>> wts = mappings.static_transition(ts, contract_dates, transition) """ if validate_inputs: # required for MultiIndex slicing _check_static(transition.sort_index(axis=1)) # the algorithm below will return invalid results if contract_dates is # not as expected so better to fail explicitly _check_contract_dates(contract_dates) if not holidays: holidays = [] # further speedup can be obtained using contract_dates.loc[timestamp:] # but this requires swapping contract_dates index and values after_contract_dates = contract_dates.loc[contract_dates >= timestamp] contracts = after_contract_dates.index front_expiry_dt = after_contract_dates.iloc[0] days_to_expiry = np.busday_count(front_expiry_dt.date(), timestamp.date(), holidays=holidays) name2num = dict(zip(transition.columns.levels[0], range(len(transition.columns.levels[0])))) if days_to_expiry in transition.index: weights_iter = transition.loc[days_to_expiry].iteritems() # roll hasn't started yet elif days_to_expiry < transition.index.min(): # provides significant speedup over transition.iloc[0].iteritems() vals = transition.values[0] weights_iter = zip(transition.columns.tolist(), vals) # roll is finished else: vals = transition.values[-1] weights_iter = zip(transition.columns.tolist(), vals) cwts = [] for idx_tuple, weighting in weights_iter: gen_name, position = idx_tuple if weighting != 0: if position == "front": cntrct_idx = name2num[gen_name] elif position == "back": cntrct_idx = name2num[gen_name] + 1 try: cntrct_name = contracts[cntrct_idx] except IndexError as e: raise type(e)(("index {0} is out of bounds in\n{1}\nas of {2} " "resulting from {3} mapping") .format(cntrct_idx, after_contract_dates, timestamp, idx_tuple) ).with_traceback(sys.exc_info()[2]) cwts.append((gen_name, cntrct_name, weighting, timestamp)) return cwts
['def', 'static_transition', '(', 'timestamp', ',', 'contract_dates', ',', 'transition', ',', 'holidays', '=', 'None', ',', 'validate_inputs', '=', 'True', ')', ':', 'if', 'validate_inputs', ':', '# required for MultiIndex slicing', '_check_static', '(', 'transition', '.', 'sort_index', '(', 'axis', '=', '1', ')', ')', '# the algorithm below will return invalid results if contract_dates is', '# not as expected so better to fail explicitly', '_check_contract_dates', '(', 'contract_dates', ')', 'if', 'not', 'holidays', ':', 'holidays', '=', '[', ']', '# further speedup can be obtained using contract_dates.loc[timestamp:]', '# but this requires swapping contract_dates index and values', 'after_contract_dates', '=', 'contract_dates', '.', 'loc', '[', 'contract_dates', '>=', 'timestamp', ']', 'contracts', '=', 'after_contract_dates', '.', 'index', 'front_expiry_dt', '=', 'after_contract_dates', '.', 'iloc', '[', '0', ']', 'days_to_expiry', '=', 'np', '.', 'busday_count', '(', 'front_expiry_dt', '.', 'date', '(', ')', ',', 'timestamp', '.', 'date', '(', ')', ',', 'holidays', '=', 'holidays', ')', 'name2num', '=', 'dict', '(', 'zip', '(', 'transition', '.', 'columns', '.', 'levels', '[', '0', ']', ',', 'range', '(', 'len', '(', 'transition', '.', 'columns', '.', 'levels', '[', '0', ']', ')', ')', ')', ')', 'if', 'days_to_expiry', 'in', 'transition', '.', 'index', ':', 'weights_iter', '=', 'transition', '.', 'loc', '[', 'days_to_expiry', ']', '.', 'iteritems', '(', ')', "# roll hasn't started yet", 'elif', 'days_to_expiry', '<', 'transition', '.', 'index', '.', 'min', '(', ')', ':', '# provides significant speedup over transition.iloc[0].iteritems()', 'vals', '=', 'transition', '.', 'values', '[', '0', ']', 'weights_iter', '=', 'zip', '(', 'transition', '.', 'columns', '.', 'tolist', '(', ')', ',', 'vals', ')', '# roll is finished', 'else', ':', 'vals', '=', 'transition', '.', 'values', '[', '-', '1', ']', 'weights_iter', '=', 'zip', '(', 'transition', '.', 'columns', '.', 'tolist', '(', ')', ',', 'vals', ')', 'cwts', '=', '[', ']', 'for', 'idx_tuple', ',', 'weighting', 'in', 'weights_iter', ':', 'gen_name', ',', 'position', '=', 'idx_tuple', 'if', 'weighting', '!=', '0', ':', 'if', 'position', '==', '"front"', ':', 'cntrct_idx', '=', 'name2num', '[', 'gen_name', ']', 'elif', 'position', '==', '"back"', ':', 'cntrct_idx', '=', 'name2num', '[', 'gen_name', ']', '+', '1', 'try', ':', 'cntrct_name', '=', 'contracts', '[', 'cntrct_idx', ']', 'except', 'IndexError', 'as', 'e', ':', 'raise', 'type', '(', 'e', ')', '(', '(', '"index {0} is out of bounds in\\n{1}\\nas of {2} "', '"resulting from {3} mapping"', ')', '.', 'format', '(', 'cntrct_idx', ',', 'after_contract_dates', ',', 'timestamp', ',', 'idx_tuple', ')', ')', '.', 'with_traceback', '(', 'sys', '.', 'exc_info', '(', ')', '[', '2', ']', ')', 'cwts', '.', 'append', '(', '(', 'gen_name', ',', 'cntrct_name', ',', 'weighting', ',', 'timestamp', ')', ')', 'return', 'cwts']
An implementation of *get_weights* parameter in roller(). Return weights to tradeable instruments for a given date based on a transition DataFrame which indicates how to roll through the roll period. Parameters ---------- timestamp: pandas.Timestamp The timestamp to return instrument weights for contract_dates: pandas.Series Series with index of tradeable contract names and pandas.Timestamps representing the last date of the roll as values, sorted by values. Index must be unique and values must be strictly monotonic. transition: pandas.DataFrame A DataFrame with a index of integers representing business day offsets from the last roll date and a column which is a MultiIndex where the top level is generic instruments and the second level is ['front', 'back'] which refer to the front month contract and the back month contract of the roll. Note that for different generics, e.g. CL1, CL2, the front and back month contract during a roll would refer to different underlying instruments. The values represent the fraction of the roll on each day during the roll period. The first row of the transition period should be completely allocated to the front contract and the last row should be completely allocated to the back contract. holidays: array_like of datetime64[D] Holidays to exclude when calculating business day offsets from the last roll date. See numpy.busday_count. validate_inputs: Boolean Whether or not to validate ordering of contract_dates and transition. **Caution** this is provided for speed however if this is set to False and inputs are not defined properly algorithm may return incorrect data. Returns ------- A list of tuples consisting of the generic instrument name, the tradeable contract as a string, the weight on this contract as a float and the date as a pandas.Timestamp. Examples -------- >>> import pandas as pd >>> import mapping.mappings as mappings >>> cols = pd.MultiIndex.from_product([["CL1", "CL2"], ['front', 'back']]) >>> idx = [-2, -1, 0] >>> transition = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5], ... [0.0, 1.0, 0.0, 1.0]], ... index=idx, columns=cols) >>> contract_dates = pd.Series([pd.Timestamp('2016-10-20'), ... pd.Timestamp('2016-11-21'), ... pd.Timestamp('2016-12-20')], ... index=['CLX16', 'CLZ16', 'CLF17']) >>> ts = pd.Timestamp('2016-10-19') >>> wts = mappings.static_transition(ts, contract_dates, transition)
['An', 'implementation', 'of', '*', 'get_weights', '*', 'parameter', 'in', 'roller', '()', '.', 'Return', 'weights', 'to', 'tradeable', 'instruments', 'for', 'a', 'given', 'date', 'based', 'on', 'a', 'transition', 'DataFrame', 'which', 'indicates', 'how', 'to', 'roll', 'through', 'the', 'roll', 'period', '.']
train
https://github.com/matthewgilbert/mapping/blob/24ea21acfe37a0ee273f63a273b5d24ea405e70d/mapping/mappings.py#L174-L282
1,023
flowersteam/explauto
explauto/sensorimotor_model/inverse/cma.py
Misc.loglikelihood
def loglikelihood(self, x, previous=False): """return log-likelihood of `x` regarding the current sample distribution""" # testing of original fct: MC integrate must be one: mean(p(x_i)) * volume(where x_i are uniformely sampled) # for i in xrange(3): print mean([cma.likelihood(20*r-10, dim * [0], None, 3) for r in rand(10000,dim)]) * 20**dim # TODO: test this!! # c=cma.fmin... # c[3]['cma'].loglikelihood(...) if previous and hasattr(self, 'lastiter'): sigma = self.lastiter.sigma Crootinv = self.lastiter._Crootinv xmean = self.lastiter.mean D = self.lastiter.D elif previous and self.countiter > 1: raise _Error('no previous distribution parameters stored, check options importance_mixing') else: sigma = self.sigma Crootinv = self._Crootinv xmean = self.mean D = self.D dx = array(x) - xmean # array(x) - array(m) n = self.N logs2pi = n * log(2 * np.pi) / 2. logdetC = 2 * sum(log(D)) dx = np.dot(Crootinv, dx) res = -sum(dx**2) / sigma**2 / 2 - logs2pi - logdetC / 2 - n * log(sigma) if 1 < 3: # testing s2pi = (2 * np.pi)**(n / 2.) detC = np.prod(D)**2 res2 = -sum(dx**2) / sigma**2 / 2 - log(s2pi * abs(detC)**0.5 * sigma**n) assert res2 < res + 1e-8 or res2 > res - 1e-8 return res
python
def loglikelihood(self, x, previous=False): """return log-likelihood of `x` regarding the current sample distribution""" # testing of original fct: MC integrate must be one: mean(p(x_i)) * volume(where x_i are uniformely sampled) # for i in xrange(3): print mean([cma.likelihood(20*r-10, dim * [0], None, 3) for r in rand(10000,dim)]) * 20**dim # TODO: test this!! # c=cma.fmin... # c[3]['cma'].loglikelihood(...) if previous and hasattr(self, 'lastiter'): sigma = self.lastiter.sigma Crootinv = self.lastiter._Crootinv xmean = self.lastiter.mean D = self.lastiter.D elif previous and self.countiter > 1: raise _Error('no previous distribution parameters stored, check options importance_mixing') else: sigma = self.sigma Crootinv = self._Crootinv xmean = self.mean D = self.D dx = array(x) - xmean # array(x) - array(m) n = self.N logs2pi = n * log(2 * np.pi) / 2. logdetC = 2 * sum(log(D)) dx = np.dot(Crootinv, dx) res = -sum(dx**2) / sigma**2 / 2 - logs2pi - logdetC / 2 - n * log(sigma) if 1 < 3: # testing s2pi = (2 * np.pi)**(n / 2.) detC = np.prod(D)**2 res2 = -sum(dx**2) / sigma**2 / 2 - log(s2pi * abs(detC)**0.5 * sigma**n) assert res2 < res + 1e-8 or res2 > res - 1e-8 return res
['def', 'loglikelihood', '(', 'self', ',', 'x', ',', 'previous', '=', 'False', ')', ':', '# testing of original fct: MC integrate must be one: mean(p(x_i)) * volume(where x_i are uniformely sampled)', '# for i in xrange(3): print mean([cma.likelihood(20*r-10, dim * [0], None, 3) for r in rand(10000,dim)]) * 20**dim', '# TODO: test this!!', '# c=cma.fmin...', "# c[3]['cma'].loglikelihood(...)", 'if', 'previous', 'and', 'hasattr', '(', 'self', ',', "'lastiter'", ')', ':', 'sigma', '=', 'self', '.', 'lastiter', '.', 'sigma', 'Crootinv', '=', 'self', '.', 'lastiter', '.', '_Crootinv', 'xmean', '=', 'self', '.', 'lastiter', '.', 'mean', 'D', '=', 'self', '.', 'lastiter', '.', 'D', 'elif', 'previous', 'and', 'self', '.', 'countiter', '>', '1', ':', 'raise', '_Error', '(', "'no previous distribution parameters stored, check options importance_mixing'", ')', 'else', ':', 'sigma', '=', 'self', '.', 'sigma', 'Crootinv', '=', 'self', '.', '_Crootinv', 'xmean', '=', 'self', '.', 'mean', 'D', '=', 'self', '.', 'D', 'dx', '=', 'array', '(', 'x', ')', '-', 'xmean', '# array(x) - array(m)', 'n', '=', 'self', '.', 'N', 'logs2pi', '=', 'n', '*', 'log', '(', '2', '*', 'np', '.', 'pi', ')', '/', '2.', 'logdetC', '=', '2', '*', 'sum', '(', 'log', '(', 'D', ')', ')', 'dx', '=', 'np', '.', 'dot', '(', 'Crootinv', ',', 'dx', ')', 'res', '=', '-', 'sum', '(', 'dx', '**', '2', ')', '/', 'sigma', '**', '2', '/', '2', '-', 'logs2pi', '-', 'logdetC', '/', '2', '-', 'n', '*', 'log', '(', 'sigma', ')', 'if', '1', '<', '3', ':', '# testing', 's2pi', '=', '(', '2', '*', 'np', '.', 'pi', ')', '**', '(', 'n', '/', '2.', ')', 'detC', '=', 'np', '.', 'prod', '(', 'D', ')', '**', '2', 'res2', '=', '-', 'sum', '(', 'dx', '**', '2', ')', '/', 'sigma', '**', '2', '/', '2', '-', 'log', '(', 's2pi', '*', 'abs', '(', 'detC', ')', '**', '0.5', '*', 'sigma', '**', 'n', ')', 'assert', 'res2', '<', 'res', '+', '1e-8', 'or', 'res2', '>', 'res', '-', '1e-8', 'return', 'res']
return log-likelihood of `x` regarding the current sample distribution
['return', 'log', '-', 'likelihood', 'of', 'x', 'regarding', 'the', 'current', 'sample', 'distribution']
train
https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/sensorimotor_model/inverse/cma.py#L7505-L7537
1,024
mikedh/trimesh
trimesh/util.py
encoded_to_array
def encoded_to_array(encoded): """ Turn a dictionary with base64 encoded strings back into a numpy array. Parameters ------------ encoded : dict Has keys: dtype: string of dtype shape: int tuple of shape base64: base64 encoded string of flat array binary: decode result coming from numpy.tostring Returns ---------- array: numpy array """ if not isinstance(encoded, dict): if is_sequence(encoded): as_array = np.asanyarray(encoded) return as_array else: raise ValueError('Unable to extract numpy array from input') encoded = decode_keys(encoded) dtype = np.dtype(encoded['dtype']) if 'base64' in encoded: array = np.frombuffer(base64.b64decode(encoded['base64']), dtype) elif 'binary' in encoded: array = np.frombuffer(encoded['binary'], dtype=dtype) if 'shape' in encoded: array = array.reshape(encoded['shape']) return array
python
def encoded_to_array(encoded): """ Turn a dictionary with base64 encoded strings back into a numpy array. Parameters ------------ encoded : dict Has keys: dtype: string of dtype shape: int tuple of shape base64: base64 encoded string of flat array binary: decode result coming from numpy.tostring Returns ---------- array: numpy array """ if not isinstance(encoded, dict): if is_sequence(encoded): as_array = np.asanyarray(encoded) return as_array else: raise ValueError('Unable to extract numpy array from input') encoded = decode_keys(encoded) dtype = np.dtype(encoded['dtype']) if 'base64' in encoded: array = np.frombuffer(base64.b64decode(encoded['base64']), dtype) elif 'binary' in encoded: array = np.frombuffer(encoded['binary'], dtype=dtype) if 'shape' in encoded: array = array.reshape(encoded['shape']) return array
['def', 'encoded_to_array', '(', 'encoded', ')', ':', 'if', 'not', 'isinstance', '(', 'encoded', ',', 'dict', ')', ':', 'if', 'is_sequence', '(', 'encoded', ')', ':', 'as_array', '=', 'np', '.', 'asanyarray', '(', 'encoded', ')', 'return', 'as_array', 'else', ':', 'raise', 'ValueError', '(', "'Unable to extract numpy array from input'", ')', 'encoded', '=', 'decode_keys', '(', 'encoded', ')', 'dtype', '=', 'np', '.', 'dtype', '(', 'encoded', '[', "'dtype'", ']', ')', 'if', "'base64'", 'in', 'encoded', ':', 'array', '=', 'np', '.', 'frombuffer', '(', 'base64', '.', 'b64decode', '(', 'encoded', '[', "'base64'", ']', ')', ',', 'dtype', ')', 'elif', "'binary'", 'in', 'encoded', ':', 'array', '=', 'np', '.', 'frombuffer', '(', 'encoded', '[', "'binary'", ']', ',', 'dtype', '=', 'dtype', ')', 'if', "'shape'", 'in', 'encoded', ':', 'array', '=', 'array', '.', 'reshape', '(', 'encoded', '[', "'shape'", ']', ')', 'return', 'array']
Turn a dictionary with base64 encoded strings back into a numpy array. Parameters ------------ encoded : dict Has keys: dtype: string of dtype shape: int tuple of shape base64: base64 encoded string of flat array binary: decode result coming from numpy.tostring Returns ---------- array: numpy array
['Turn', 'a', 'dictionary', 'with', 'base64', 'encoded', 'strings', 'back', 'into', 'a', 'numpy', 'array', '.']
train
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/util.py#L1142-L1178
1,025
StackStorm/pybind
pybind/slxos/v17s_1_02/__init__.py
brocade_rbridge._set_rbridge_id
def _set_rbridge_id(self, v, load=False): """ Setter method for rbridge_id, mapped from YANG variable /rbridge_id (list) If this variable is read-only (config: false) in the source YANG file, then _set_rbridge_id is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_rbridge_id() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("rbridge_id",rbridge_id.rbridge_id, yang_name="rbridge-id", rest_name="rbridge-id", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='rbridge-id', extensions={u'tailf-common': {u'info': u'Rbridge Id for Node Specific configuration', u'callpoint': u'vcsnodespecificcallpoint', u'sort-priority': u'RUNNCFG_LEVEL_RBRIDGE', u'cli-suppress-no': None, u'display-when': u'/vcsmode/vcs-mode = "true"'}}), is_container='list', yang_name="rbridge-id", rest_name="rbridge-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Rbridge Id for Node Specific configuration', u'callpoint': u'vcsnodespecificcallpoint', u'sort-priority': u'RUNNCFG_LEVEL_RBRIDGE', u'cli-suppress-no': None, u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-rbridge', defining_module='brocade-rbridge', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """rbridge_id must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("rbridge_id",rbridge_id.rbridge_id, yang_name="rbridge-id", rest_name="rbridge-id", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='rbridge-id', extensions={u'tailf-common': {u'info': u'Rbridge Id for Node Specific configuration', u'callpoint': u'vcsnodespecificcallpoint', u'sort-priority': u'RUNNCFG_LEVEL_RBRIDGE', u'cli-suppress-no': None, u'display-when': u'/vcsmode/vcs-mode = "true"'}}), is_container='list', yang_name="rbridge-id", rest_name="rbridge-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Rbridge Id for Node Specific configuration', u'callpoint': u'vcsnodespecificcallpoint', u'sort-priority': u'RUNNCFG_LEVEL_RBRIDGE', u'cli-suppress-no': None, u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-rbridge', defining_module='brocade-rbridge', yang_type='list', is_config=True)""", }) self.__rbridge_id = t if hasattr(self, '_set'): self._set()
python
def _set_rbridge_id(self, v, load=False): """ Setter method for rbridge_id, mapped from YANG variable /rbridge_id (list) If this variable is read-only (config: false) in the source YANG file, then _set_rbridge_id is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_rbridge_id() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("rbridge_id",rbridge_id.rbridge_id, yang_name="rbridge-id", rest_name="rbridge-id", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='rbridge-id', extensions={u'tailf-common': {u'info': u'Rbridge Id for Node Specific configuration', u'callpoint': u'vcsnodespecificcallpoint', u'sort-priority': u'RUNNCFG_LEVEL_RBRIDGE', u'cli-suppress-no': None, u'display-when': u'/vcsmode/vcs-mode = "true"'}}), is_container='list', yang_name="rbridge-id", rest_name="rbridge-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Rbridge Id for Node Specific configuration', u'callpoint': u'vcsnodespecificcallpoint', u'sort-priority': u'RUNNCFG_LEVEL_RBRIDGE', u'cli-suppress-no': None, u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-rbridge', defining_module='brocade-rbridge', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """rbridge_id must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("rbridge_id",rbridge_id.rbridge_id, yang_name="rbridge-id", rest_name="rbridge-id", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='rbridge-id', extensions={u'tailf-common': {u'info': u'Rbridge Id for Node Specific configuration', u'callpoint': u'vcsnodespecificcallpoint', u'sort-priority': u'RUNNCFG_LEVEL_RBRIDGE', u'cli-suppress-no': None, u'display-when': u'/vcsmode/vcs-mode = "true"'}}), is_container='list', yang_name="rbridge-id", rest_name="rbridge-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Rbridge Id for Node Specific configuration', u'callpoint': u'vcsnodespecificcallpoint', u'sort-priority': u'RUNNCFG_LEVEL_RBRIDGE', u'cli-suppress-no': None, u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-rbridge', defining_module='brocade-rbridge', yang_type='list', is_config=True)""", }) self.__rbridge_id = t if hasattr(self, '_set'): self._set()
['def', '_set_rbridge_id', '(', 'self', ',', 'v', ',', 'load', '=', 'False', ')', ':', 'if', 'hasattr', '(', 'v', ',', '"_utype"', ')', ':', 'v', '=', 'v', '.', '_utype', '(', 'v', ')', 'try', ':', 't', '=', 'YANGDynClass', '(', 'v', ',', 'base', '=', 'YANGListType', '(', '"rbridge_id"', ',', 'rbridge_id', '.', 'rbridge_id', ',', 'yang_name', '=', '"rbridge-id"', ',', 'rest_name', '=', '"rbridge-id"', ',', 'parent', '=', 'self', ',', 'is_container', '=', "'list'", ',', 'user_ordered', '=', 'False', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'yang_keys', '=', "'rbridge-id'", ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'info'", ':', "u'Rbridge Id for Node Specific configuration'", ',', "u'callpoint'", ':', "u'vcsnodespecificcallpoint'", ',', "u'sort-priority'", ':', "u'RUNNCFG_LEVEL_RBRIDGE'", ',', "u'cli-suppress-no'", ':', 'None', ',', "u'display-when'", ':', 'u\'/vcsmode/vcs-mode = "true"\'', '}', '}', ')', ',', 'is_container', '=', "'list'", ',', 'yang_name', '=', '"rbridge-id"', ',', 'rest_name', '=', '"rbridge-id"', ',', 'parent', '=', 'self', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'extmethods', '=', 'self', '.', '_extmethods', ',', 'register_paths', '=', 'True', ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'info'", ':', "u'Rbridge Id for Node Specific configuration'", ',', "u'callpoint'", ':', "u'vcsnodespecificcallpoint'", ',', "u'sort-priority'", ':', "u'RUNNCFG_LEVEL_RBRIDGE'", ',', "u'cli-suppress-no'", ':', 'None', ',', "u'display-when'", ':', 'u\'/vcsmode/vcs-mode = "true"\'', '}', '}', ',', 'namespace', '=', "'urn:brocade.com:mgmt:brocade-rbridge'", ',', 'defining_module', '=', "'brocade-rbridge'", ',', 'yang_type', '=', "'list'", ',', 'is_config', '=', 'True', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'ValueError', '(', '{', "'error-string'", ':', '"""rbridge_id must be of a type compatible with list"""', ',', "'defined-type'", ':', '"list"', ',', "'generated-type'", ':', '"""YANGDynClass(base=YANGListType("rbridge_id",rbridge_id.rbridge_id, yang_name="rbridge-id", rest_name="rbridge-id", parent=self, is_container=\'list\', user_ordered=False, path_helper=self._path_helper, yang_keys=\'rbridge-id\', extensions={u\'tailf-common\': {u\'info\': u\'Rbridge Id for Node Specific configuration\', u\'callpoint\': u\'vcsnodespecificcallpoint\', u\'sort-priority\': u\'RUNNCFG_LEVEL_RBRIDGE\', u\'cli-suppress-no\': None, u\'display-when\': u\'/vcsmode/vcs-mode = "true"\'}}), is_container=\'list\', yang_name="rbridge-id", rest_name="rbridge-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Rbridge Id for Node Specific configuration\', u\'callpoint\': u\'vcsnodespecificcallpoint\', u\'sort-priority\': u\'RUNNCFG_LEVEL_RBRIDGE\', u\'cli-suppress-no\': None, u\'display-when\': u\'/vcsmode/vcs-mode = "true"\'}}, namespace=\'urn:brocade.com:mgmt:brocade-rbridge\', defining_module=\'brocade-rbridge\', yang_type=\'list\', is_config=True)"""', ',', '}', ')', 'self', '.', '__rbridge_id', '=', 't', 'if', 'hasattr', '(', 'self', ',', "'_set'", ')', ':', 'self', '.', '_set', '(', ')']
Setter method for rbridge_id, mapped from YANG variable /rbridge_id (list) If this variable is read-only (config: false) in the source YANG file, then _set_rbridge_id is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_rbridge_id() directly.
['Setter', 'method', 'for', 'rbridge_id', 'mapped', 'from', 'YANG', 'variable', '/', 'rbridge_id', '(', 'list', ')', 'If', 'this', 'variable', 'is', 'read', '-', 'only', '(', 'config', ':', 'false', ')', 'in', 'the', 'source', 'YANG', 'file', 'then', '_set_rbridge_id', 'is', 'considered', 'as', 'a', 'private', 'method', '.', 'Backends', 'looking', 'to', 'populate', 'this', 'variable', 'should', 'do', 'so', 'via', 'calling', 'thisObj', '.', '_set_rbridge_id', '()', 'directly', '.']
train
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/__init__.py#L10820-L10841
1,026
pyroscope/pyrocore
src/pyrocore/torrent/engine.py
TorrentView.items
def items(self): """ Get list of download items. """ if self.matcher: for item in self._fetch_items(): if self.matcher.match(item): yield item else: for item in self._fetch_items(): yield item
python
def items(self): """ Get list of download items. """ if self.matcher: for item in self._fetch_items(): if self.matcher.match(item): yield item else: for item in self._fetch_items(): yield item
['def', 'items', '(', 'self', ')', ':', 'if', 'self', '.', 'matcher', ':', 'for', 'item', 'in', 'self', '.', '_fetch_items', '(', ')', ':', 'if', 'self', '.', 'matcher', '.', 'match', '(', 'item', ')', ':', 'yield', 'item', 'else', ':', 'for', 'item', 'in', 'self', '.', '_fetch_items', '(', ')', ':', 'yield', 'item']
Get list of download items.
['Get', 'list', 'of', 'download', 'items', '.']
train
https://github.com/pyroscope/pyrocore/blob/89ad01346a570943d20311a0b488440975876612/src/pyrocore/torrent/engine.py#L640-L649
1,027
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_tunnels_ext.py
brocade_tunnels_ext.get_tunnel_info_output_tunnel_dest_ip
def get_tunnel_info_output_tunnel_dest_ip(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_tunnel_info = ET.Element("get_tunnel_info") config = get_tunnel_info output = ET.SubElement(get_tunnel_info, "output") tunnel = ET.SubElement(output, "tunnel") dest_ip = ET.SubElement(tunnel, "dest-ip") dest_ip.text = kwargs.pop('dest_ip') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def get_tunnel_info_output_tunnel_dest_ip(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_tunnel_info = ET.Element("get_tunnel_info") config = get_tunnel_info output = ET.SubElement(get_tunnel_info, "output") tunnel = ET.SubElement(output, "tunnel") dest_ip = ET.SubElement(tunnel, "dest-ip") dest_ip.text = kwargs.pop('dest_ip') callback = kwargs.pop('callback', self._callback) return callback(config)
['def', 'get_tunnel_info_output_tunnel_dest_ip', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'get_tunnel_info', '=', 'ET', '.', 'Element', '(', '"get_tunnel_info"', ')', 'config', '=', 'get_tunnel_info', 'output', '=', 'ET', '.', 'SubElement', '(', 'get_tunnel_info', ',', '"output"', ')', 'tunnel', '=', 'ET', '.', 'SubElement', '(', 'output', ',', '"tunnel"', ')', 'dest_ip', '=', 'ET', '.', 'SubElement', '(', 'tunnel', ',', '"dest-ip"', ')', 'dest_ip', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'dest_ip'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')']
Auto Generated Code
['Auto', 'Generated', 'Code']
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_tunnels_ext.py#L217-L229
1,028
openstack/hacking
hacking/checks/comments.py
hacking_has_only_comments
def hacking_has_only_comments(physical_line, filename, lines, line_number): """Check for empty files with only comments H104 empty file with only comments """ if line_number == 1 and all(map(EMPTY_LINE_RE.match, lines)): return (0, "H104: File contains nothing but comments")
python
def hacking_has_only_comments(physical_line, filename, lines, line_number): """Check for empty files with only comments H104 empty file with only comments """ if line_number == 1 and all(map(EMPTY_LINE_RE.match, lines)): return (0, "H104: File contains nothing but comments")
['def', 'hacking_has_only_comments', '(', 'physical_line', ',', 'filename', ',', 'lines', ',', 'line_number', ')', ':', 'if', 'line_number', '==', '1', 'and', 'all', '(', 'map', '(', 'EMPTY_LINE_RE', '.', 'match', ',', 'lines', ')', ')', ':', 'return', '(', '0', ',', '"H104: File contains nothing but comments"', ')']
Check for empty files with only comments H104 empty file with only comments
['Check', 'for', 'empty', 'files', 'with', 'only', 'comments']
train
https://github.com/openstack/hacking/blob/10e58f907181cac91d3b2af422c2458b04a1ec79/hacking/checks/comments.py#L100-L106
1,029
pivotal-energy-solutions/django-datatable-view
datatableview/helpers.py
format
def format(format_string, cast=lambda x: x): """ A pre-called helper to supply a modern string format (the kind with {} instead of %s), so that it can apply to each value in the column as it is rendered. This can be useful for string padding like leading zeroes, or rounding floating point numbers to a certain number of decimal places, etc. If given, the ``cast`` argument should be a mapping function that coerces the input to whatever type is required for the string formatting to work. Trying to push string data into a float format will raise an exception, for example, so the ``float`` type itself could be given as the ``cast`` function. Examples:: # Perform some 0 padding item_number = columns.FloatColumn("Item No.", sources=['number'], processor=format("{:03d})) # Force a string column value to coerce to float and round to 2 decimal places rating = columns.TextColumn("Rating", sources=['avg_rating'], processor=format("{:.2f}", cast=float)) """ def helper(instance, *args, **kwargs): value = kwargs.get('default_value') if value is None: value = instance value = cast(value) return format_string.format(value, obj=instance) return helper
python
def format(format_string, cast=lambda x: x): """ A pre-called helper to supply a modern string format (the kind with {} instead of %s), so that it can apply to each value in the column as it is rendered. This can be useful for string padding like leading zeroes, or rounding floating point numbers to a certain number of decimal places, etc. If given, the ``cast`` argument should be a mapping function that coerces the input to whatever type is required for the string formatting to work. Trying to push string data into a float format will raise an exception, for example, so the ``float`` type itself could be given as the ``cast`` function. Examples:: # Perform some 0 padding item_number = columns.FloatColumn("Item No.", sources=['number'], processor=format("{:03d})) # Force a string column value to coerce to float and round to 2 decimal places rating = columns.TextColumn("Rating", sources=['avg_rating'], processor=format("{:.2f}", cast=float)) """ def helper(instance, *args, **kwargs): value = kwargs.get('default_value') if value is None: value = instance value = cast(value) return format_string.format(value, obj=instance) return helper
['def', 'format', '(', 'format_string', ',', 'cast', '=', 'lambda', 'x', ':', 'x', ')', ':', 'def', 'helper', '(', 'instance', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'value', '=', 'kwargs', '.', 'get', '(', "'default_value'", ')', 'if', 'value', 'is', 'None', ':', 'value', '=', 'instance', 'value', '=', 'cast', '(', 'value', ')', 'return', 'format_string', '.', 'format', '(', 'value', ',', 'obj', '=', 'instance', ')', 'return', 'helper']
A pre-called helper to supply a modern string format (the kind with {} instead of %s), so that it can apply to each value in the column as it is rendered. This can be useful for string padding like leading zeroes, or rounding floating point numbers to a certain number of decimal places, etc. If given, the ``cast`` argument should be a mapping function that coerces the input to whatever type is required for the string formatting to work. Trying to push string data into a float format will raise an exception, for example, so the ``float`` type itself could be given as the ``cast`` function. Examples:: # Perform some 0 padding item_number = columns.FloatColumn("Item No.", sources=['number'], processor=format("{:03d})) # Force a string column value to coerce to float and round to 2 decimal places rating = columns.TextColumn("Rating", sources=['avg_rating'], processor=format("{:.2f}", cast=float))
['A', 'pre', '-', 'called', 'helper', 'to', 'supply', 'a', 'modern', 'string', 'format', '(', 'the', 'kind', 'with', '{}', 'instead', 'of', '%s', ')', 'so', 'that', 'it', 'can', 'apply', 'to', 'each', 'value', 'in', 'the', 'column', 'as', 'it', 'is', 'rendered', '.', 'This', 'can', 'be', 'useful', 'for', 'string', 'padding', 'like', 'leading', 'zeroes', 'or', 'rounding', 'floating', 'point', 'numbers', 'to', 'a', 'certain', 'number', 'of', 'decimal', 'places', 'etc', '.']
train
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/helpers.py#L256-L286
1,030
MaxHalford/prince
prince/pca.py
PCA.plot_row_coordinates
def plot_row_coordinates(self, X, ax=None, figsize=(6, 6), x_component=0, y_component=1, labels=None, color_labels=None, ellipse_outline=False, ellipse_fill=True, show_points=True, **kwargs): """Plot the row principal coordinates.""" utils.validation.check_is_fitted(self, 's_') if ax is None: fig, ax = plt.subplots(figsize=figsize) # Add style ax = plot.stylize_axis(ax) # Make sure X is a DataFrame if not isinstance(X, pd.DataFrame): X = pd.DataFrame(X) # Retrieve principal coordinates coordinates = self.row_coordinates(X) x = coordinates[x_component].astype(np.float) y = coordinates[y_component].astype(np.float) # Plot if color_labels is None: ax.scatter(x, y, **kwargs) else: for color_label in sorted(list(set(color_labels))): mask = np.array(color_labels) == color_label color = ax._get_lines.get_next_color() # Plot points if show_points: ax.scatter(x[mask], y[mask], color=color, **kwargs, label=color_label) # Plot ellipse if (ellipse_outline or ellipse_fill): x_mean, y_mean, width, height, angle = plot.build_ellipse(x[mask], y[mask]) ax.add_patch(mpl.patches.Ellipse( (x_mean, y_mean), width, height, angle=angle, linewidth=2 if ellipse_outline else 0, color=color, fill=ellipse_fill, alpha=0.2 + (0.3 if not show_points else 0) if ellipse_fill else 1 )) # Add labels if labels is not None: for i, label in enumerate(labels): ax.annotate(label, (x[i], y[i])) # Legend ax.legend() # Text ax.set_title('Row principal coordinates') ei = self.explained_inertia_ ax.set_xlabel('Component {} ({:.2f}% inertia)'.format(x_component, 100 * ei[x_component])) ax.set_ylabel('Component {} ({:.2f}% inertia)'.format(y_component, 100 * ei[y_component])) return ax
python
def plot_row_coordinates(self, X, ax=None, figsize=(6, 6), x_component=0, y_component=1, labels=None, color_labels=None, ellipse_outline=False, ellipse_fill=True, show_points=True, **kwargs): """Plot the row principal coordinates.""" utils.validation.check_is_fitted(self, 's_') if ax is None: fig, ax = plt.subplots(figsize=figsize) # Add style ax = plot.stylize_axis(ax) # Make sure X is a DataFrame if not isinstance(X, pd.DataFrame): X = pd.DataFrame(X) # Retrieve principal coordinates coordinates = self.row_coordinates(X) x = coordinates[x_component].astype(np.float) y = coordinates[y_component].astype(np.float) # Plot if color_labels is None: ax.scatter(x, y, **kwargs) else: for color_label in sorted(list(set(color_labels))): mask = np.array(color_labels) == color_label color = ax._get_lines.get_next_color() # Plot points if show_points: ax.scatter(x[mask], y[mask], color=color, **kwargs, label=color_label) # Plot ellipse if (ellipse_outline or ellipse_fill): x_mean, y_mean, width, height, angle = plot.build_ellipse(x[mask], y[mask]) ax.add_patch(mpl.patches.Ellipse( (x_mean, y_mean), width, height, angle=angle, linewidth=2 if ellipse_outline else 0, color=color, fill=ellipse_fill, alpha=0.2 + (0.3 if not show_points else 0) if ellipse_fill else 1 )) # Add labels if labels is not None: for i, label in enumerate(labels): ax.annotate(label, (x[i], y[i])) # Legend ax.legend() # Text ax.set_title('Row principal coordinates') ei = self.explained_inertia_ ax.set_xlabel('Component {} ({:.2f}% inertia)'.format(x_component, 100 * ei[x_component])) ax.set_ylabel('Component {} ({:.2f}% inertia)'.format(y_component, 100 * ei[y_component])) return ax
['def', 'plot_row_coordinates', '(', 'self', ',', 'X', ',', 'ax', '=', 'None', ',', 'figsize', '=', '(', '6', ',', '6', ')', ',', 'x_component', '=', '0', ',', 'y_component', '=', '1', ',', 'labels', '=', 'None', ',', 'color_labels', '=', 'None', ',', 'ellipse_outline', '=', 'False', ',', 'ellipse_fill', '=', 'True', ',', 'show_points', '=', 'True', ',', '*', '*', 'kwargs', ')', ':', 'utils', '.', 'validation', '.', 'check_is_fitted', '(', 'self', ',', "'s_'", ')', 'if', 'ax', 'is', 'None', ':', 'fig', ',', 'ax', '=', 'plt', '.', 'subplots', '(', 'figsize', '=', 'figsize', ')', '# Add style', 'ax', '=', 'plot', '.', 'stylize_axis', '(', 'ax', ')', '# Make sure X is a DataFrame', 'if', 'not', 'isinstance', '(', 'X', ',', 'pd', '.', 'DataFrame', ')', ':', 'X', '=', 'pd', '.', 'DataFrame', '(', 'X', ')', '# Retrieve principal coordinates', 'coordinates', '=', 'self', '.', 'row_coordinates', '(', 'X', ')', 'x', '=', 'coordinates', '[', 'x_component', ']', '.', 'astype', '(', 'np', '.', 'float', ')', 'y', '=', 'coordinates', '[', 'y_component', ']', '.', 'astype', '(', 'np', '.', 'float', ')', '# Plot', 'if', 'color_labels', 'is', 'None', ':', 'ax', '.', 'scatter', '(', 'x', ',', 'y', ',', '*', '*', 'kwargs', ')', 'else', ':', 'for', 'color_label', 'in', 'sorted', '(', 'list', '(', 'set', '(', 'color_labels', ')', ')', ')', ':', 'mask', '=', 'np', '.', 'array', '(', 'color_labels', ')', '==', 'color_label', 'color', '=', 'ax', '.', '_get_lines', '.', 'get_next_color', '(', ')', '# Plot points', 'if', 'show_points', ':', 'ax', '.', 'scatter', '(', 'x', '[', 'mask', ']', ',', 'y', '[', 'mask', ']', ',', 'color', '=', 'color', ',', '*', '*', 'kwargs', ',', 'label', '=', 'color_label', ')', '# Plot ellipse', 'if', '(', 'ellipse_outline', 'or', 'ellipse_fill', ')', ':', 'x_mean', ',', 'y_mean', ',', 'width', ',', 'height', ',', 'angle', '=', 'plot', '.', 'build_ellipse', '(', 'x', '[', 'mask', ']', ',', 'y', '[', 'mask', ']', ')', 'ax', '.', 'add_patch', '(', 'mpl', '.', 'patches', '.', 'Ellipse', '(', '(', 'x_mean', ',', 'y_mean', ')', ',', 'width', ',', 'height', ',', 'angle', '=', 'angle', ',', 'linewidth', '=', '2', 'if', 'ellipse_outline', 'else', '0', ',', 'color', '=', 'color', ',', 'fill', '=', 'ellipse_fill', ',', 'alpha', '=', '0.2', '+', '(', '0.3', 'if', 'not', 'show_points', 'else', '0', ')', 'if', 'ellipse_fill', 'else', '1', ')', ')', '# Add labels', 'if', 'labels', 'is', 'not', 'None', ':', 'for', 'i', ',', 'label', 'in', 'enumerate', '(', 'labels', ')', ':', 'ax', '.', 'annotate', '(', 'label', ',', '(', 'x', '[', 'i', ']', ',', 'y', '[', 'i', ']', ')', ')', '# Legend', 'ax', '.', 'legend', '(', ')', '# Text', 'ax', '.', 'set_title', '(', "'Row principal coordinates'", ')', 'ei', '=', 'self', '.', 'explained_inertia_', 'ax', '.', 'set_xlabel', '(', "'Component {} ({:.2f}% inertia)'", '.', 'format', '(', 'x_component', ',', '100', '*', 'ei', '[', 'x_component', ']', ')', ')', 'ax', '.', 'set_ylabel', '(', "'Component {} ({:.2f}% inertia)'", '.', 'format', '(', 'y_component', ',', '100', '*', 'ei', '[', 'y_component', ']', ')', ')', 'return', 'ax']
Plot the row principal coordinates.
['Plot', 'the', 'row', 'principal', 'coordinates', '.']
train
https://github.com/MaxHalford/prince/blob/714c9cdfc4d9f8823eabf550a23ad01fe87c50d7/prince/pca.py#L169-L228
1,031
apache/spark
python/pyspark/sql/readwriter.py
DataFrameReader.text
def text(self, paths, wholetext=False, lineSep=None): """ Loads text files and returns a :class:`DataFrame` whose schema starts with a string column named "value", and followed by partitioned columns if there are any. The text files must be encoded as UTF-8. By default, each line in the text file is a new row in the resulting DataFrame. :param paths: string, or list of strings, for input path(s). :param wholetext: if true, read each file from input path(s) as a single row. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. >>> df = spark.read.text('python/test_support/sql/text-test.txt') >>> df.collect() [Row(value=u'hello'), Row(value=u'this')] >>> df = spark.read.text('python/test_support/sql/text-test.txt', wholetext=True) >>> df.collect() [Row(value=u'hello\\nthis')] """ self._set_opts(wholetext=wholetext, lineSep=lineSep) if isinstance(paths, basestring): paths = [paths] return self._df(self._jreader.text(self._spark._sc._jvm.PythonUtils.toSeq(paths)))
python
def text(self, paths, wholetext=False, lineSep=None): """ Loads text files and returns a :class:`DataFrame` whose schema starts with a string column named "value", and followed by partitioned columns if there are any. The text files must be encoded as UTF-8. By default, each line in the text file is a new row in the resulting DataFrame. :param paths: string, or list of strings, for input path(s). :param wholetext: if true, read each file from input path(s) as a single row. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. >>> df = spark.read.text('python/test_support/sql/text-test.txt') >>> df.collect() [Row(value=u'hello'), Row(value=u'this')] >>> df = spark.read.text('python/test_support/sql/text-test.txt', wholetext=True) >>> df.collect() [Row(value=u'hello\\nthis')] """ self._set_opts(wholetext=wholetext, lineSep=lineSep) if isinstance(paths, basestring): paths = [paths] return self._df(self._jreader.text(self._spark._sc._jvm.PythonUtils.toSeq(paths)))
['def', 'text', '(', 'self', ',', 'paths', ',', 'wholetext', '=', 'False', ',', 'lineSep', '=', 'None', ')', ':', 'self', '.', '_set_opts', '(', 'wholetext', '=', 'wholetext', ',', 'lineSep', '=', 'lineSep', ')', 'if', 'isinstance', '(', 'paths', ',', 'basestring', ')', ':', 'paths', '=', '[', 'paths', ']', 'return', 'self', '.', '_df', '(', 'self', '.', '_jreader', '.', 'text', '(', 'self', '.', '_spark', '.', '_sc', '.', '_jvm', '.', 'PythonUtils', '.', 'toSeq', '(', 'paths', ')', ')', ')']
Loads text files and returns a :class:`DataFrame` whose schema starts with a string column named "value", and followed by partitioned columns if there are any. The text files must be encoded as UTF-8. By default, each line in the text file is a new row in the resulting DataFrame. :param paths: string, or list of strings, for input path(s). :param wholetext: if true, read each file from input path(s) as a single row. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. >>> df = spark.read.text('python/test_support/sql/text-test.txt') >>> df.collect() [Row(value=u'hello'), Row(value=u'this')] >>> df = spark.read.text('python/test_support/sql/text-test.txt', wholetext=True) >>> df.collect() [Row(value=u'hello\\nthis')]
['Loads', 'text', 'files', 'and', 'returns', 'a', ':', 'class', ':', 'DataFrame', 'whose', 'schema', 'starts', 'with', 'a', 'string', 'column', 'named', 'value', 'and', 'followed', 'by', 'partitioned', 'columns', 'if', 'there', 'are', 'any', '.', 'The', 'text', 'files', 'must', 'be', 'encoded', 'as', 'UTF', '-', '8', '.']
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/readwriter.py#L325-L349
1,032
allenai/allennlp
allennlp/state_machines/transition_functions/basic_transition_function.py
BasicTransitionFunction.attend_on_question
def attend_on_question(self, query: torch.Tensor, encoder_outputs: torch.Tensor, encoder_output_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ Given a query (which is typically the decoder hidden state), compute an attention over the output of the question encoder, and return a weighted sum of the question representations given this attention. We also return the attention weights themselves. This is a simple computation, but we have it as a separate method so that the ``forward`` method on the main parser module can call it on the initial hidden state, to simplify the logic in ``take_step``. """ # (group_size, question_length) question_attention_weights = self._input_attention(query, encoder_outputs, encoder_output_mask) # (group_size, encoder_output_dim) attended_question = util.weighted_sum(encoder_outputs, question_attention_weights) return attended_question, question_attention_weights
python
def attend_on_question(self, query: torch.Tensor, encoder_outputs: torch.Tensor, encoder_output_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ Given a query (which is typically the decoder hidden state), compute an attention over the output of the question encoder, and return a weighted sum of the question representations given this attention. We also return the attention weights themselves. This is a simple computation, but we have it as a separate method so that the ``forward`` method on the main parser module can call it on the initial hidden state, to simplify the logic in ``take_step``. """ # (group_size, question_length) question_attention_weights = self._input_attention(query, encoder_outputs, encoder_output_mask) # (group_size, encoder_output_dim) attended_question = util.weighted_sum(encoder_outputs, question_attention_weights) return attended_question, question_attention_weights
['def', 'attend_on_question', '(', 'self', ',', 'query', ':', 'torch', '.', 'Tensor', ',', 'encoder_outputs', ':', 'torch', '.', 'Tensor', ',', 'encoder_output_mask', ':', 'torch', '.', 'Tensor', ')', '->', 'Tuple', '[', 'torch', '.', 'Tensor', ',', 'torch', '.', 'Tensor', ']', ':', '# (group_size, question_length)', 'question_attention_weights', '=', 'self', '.', '_input_attention', '(', 'query', ',', 'encoder_outputs', ',', 'encoder_output_mask', ')', '# (group_size, encoder_output_dim)', 'attended_question', '=', 'util', '.', 'weighted_sum', '(', 'encoder_outputs', ',', 'question_attention_weights', ')', 'return', 'attended_question', ',', 'question_attention_weights']
Given a query (which is typically the decoder hidden state), compute an attention over the output of the question encoder, and return a weighted sum of the question representations given this attention. We also return the attention weights themselves. This is a simple computation, but we have it as a separate method so that the ``forward`` method on the main parser module can call it on the initial hidden state, to simplify the logic in ``take_step``.
['Given', 'a', 'query', '(', 'which', 'is', 'typically', 'the', 'decoder', 'hidden', 'state', ')', 'compute', 'an', 'attention', 'over', 'the', 'output', 'of', 'the', 'question', 'encoder', 'and', 'return', 'a', 'weighted', 'sum', 'of', 'the', 'question', 'representations', 'given', 'this', 'attention', '.', 'We', 'also', 'return', 'the', 'attention', 'weights', 'themselves', '.']
train
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/state_machines/transition_functions/basic_transition_function.py#L393-L412
1,033
saltstack/salt
salt/modules/at.py
at
def at(*args, **kwargs): # pylint: disable=C0103 ''' Add a job to the queue. The 'timespec' follows the format documented in the at(1) manpage. CLI Example: .. code-block:: bash salt '*' at.at <timespec> <cmd> [tag=<tag>] [runas=<user>] salt '*' at.at 12:05am '/sbin/reboot' tag=reboot salt '*' at.at '3:05am +3 days' 'bin/myscript' tag=nightly runas=jim ''' if len(args) < 2: return {'jobs': []} # Shim to produce output similar to what __virtual__() should do # but __salt__ isn't available in __virtual__() binary = salt.utils.path.which('at') if not binary: return '\'at.at\' is not available.' if 'tag' in kwargs: stdin = '### SALT: {0}\n{1}'.format(kwargs['tag'], ' '.join(args[1:])) else: stdin = ' '.join(args[1:]) cmd = [binary, args[0]] cmd_kwargs = {'stdin': stdin, 'python_shell': False} if 'runas' in kwargs: cmd_kwargs['runas'] = kwargs['runas'] output = __salt__['cmd.run'](cmd, **cmd_kwargs) if output is None: return '\'at.at\' is not available.' if output.endswith('Garbled time'): return {'jobs': [], 'error': 'invalid timespec'} if output.startswith('warning: commands'): output = output.splitlines()[1] if output.startswith('commands will be executed'): output = output.splitlines()[1] output = output.split()[1] if __grains__['os'] in BSD: return atq(six.text_type(output)) else: return atq(int(output))
python
def at(*args, **kwargs): # pylint: disable=C0103 ''' Add a job to the queue. The 'timespec' follows the format documented in the at(1) manpage. CLI Example: .. code-block:: bash salt '*' at.at <timespec> <cmd> [tag=<tag>] [runas=<user>] salt '*' at.at 12:05am '/sbin/reboot' tag=reboot salt '*' at.at '3:05am +3 days' 'bin/myscript' tag=nightly runas=jim ''' if len(args) < 2: return {'jobs': []} # Shim to produce output similar to what __virtual__() should do # but __salt__ isn't available in __virtual__() binary = salt.utils.path.which('at') if not binary: return '\'at.at\' is not available.' if 'tag' in kwargs: stdin = '### SALT: {0}\n{1}'.format(kwargs['tag'], ' '.join(args[1:])) else: stdin = ' '.join(args[1:]) cmd = [binary, args[0]] cmd_kwargs = {'stdin': stdin, 'python_shell': False} if 'runas' in kwargs: cmd_kwargs['runas'] = kwargs['runas'] output = __salt__['cmd.run'](cmd, **cmd_kwargs) if output is None: return '\'at.at\' is not available.' if output.endswith('Garbled time'): return {'jobs': [], 'error': 'invalid timespec'} if output.startswith('warning: commands'): output = output.splitlines()[1] if output.startswith('commands will be executed'): output = output.splitlines()[1] output = output.split()[1] if __grains__['os'] in BSD: return atq(six.text_type(output)) else: return atq(int(output))
['def', 'at', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', '# pylint: disable=C0103', 'if', 'len', '(', 'args', ')', '<', '2', ':', 'return', '{', "'jobs'", ':', '[', ']', '}', '# Shim to produce output similar to what __virtual__() should do', "# but __salt__ isn't available in __virtual__()", 'binary', '=', 'salt', '.', 'utils', '.', 'path', '.', 'which', '(', "'at'", ')', 'if', 'not', 'binary', ':', 'return', "'\\'at.at\\' is not available.'", 'if', "'tag'", 'in', 'kwargs', ':', 'stdin', '=', "'### SALT: {0}\\n{1}'", '.', 'format', '(', 'kwargs', '[', "'tag'", ']', ',', "' '", '.', 'join', '(', 'args', '[', '1', ':', ']', ')', ')', 'else', ':', 'stdin', '=', "' '", '.', 'join', '(', 'args', '[', '1', ':', ']', ')', 'cmd', '=', '[', 'binary', ',', 'args', '[', '0', ']', ']', 'cmd_kwargs', '=', '{', "'stdin'", ':', 'stdin', ',', "'python_shell'", ':', 'False', '}', 'if', "'runas'", 'in', 'kwargs', ':', 'cmd_kwargs', '[', "'runas'", ']', '=', 'kwargs', '[', "'runas'", ']', 'output', '=', '__salt__', '[', "'cmd.run'", ']', '(', 'cmd', ',', '*', '*', 'cmd_kwargs', ')', 'if', 'output', 'is', 'None', ':', 'return', "'\\'at.at\\' is not available.'", 'if', 'output', '.', 'endswith', '(', "'Garbled time'", ')', ':', 'return', '{', "'jobs'", ':', '[', ']', ',', "'error'", ':', "'invalid timespec'", '}', 'if', 'output', '.', 'startswith', '(', "'warning: commands'", ')', ':', 'output', '=', 'output', '.', 'splitlines', '(', ')', '[', '1', ']', 'if', 'output', '.', 'startswith', '(', "'commands will be executed'", ')', ':', 'output', '=', 'output', '.', 'splitlines', '(', ')', '[', '1', ']', 'output', '=', 'output', '.', 'split', '(', ')', '[', '1', ']', 'if', '__grains__', '[', "'os'", ']', 'in', 'BSD', ':', 'return', 'atq', '(', 'six', '.', 'text_type', '(', 'output', ')', ')', 'else', ':', 'return', 'atq', '(', 'int', '(', 'output', ')', ')']
Add a job to the queue. The 'timespec' follows the format documented in the at(1) manpage. CLI Example: .. code-block:: bash salt '*' at.at <timespec> <cmd> [tag=<tag>] [runas=<user>] salt '*' at.at 12:05am '/sbin/reboot' tag=reboot salt '*' at.at '3:05am +3 days' 'bin/myscript' tag=nightly runas=jim
['Add', 'a', 'job', 'to', 'the', 'queue', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/at.py#L210-L263
1,034
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/rmic.py
emit_rmic_classes
def emit_rmic_classes(target, source, env): """Create and return lists of Java RMI stub and skeleton class files to be created from a set of class files. """ class_suffix = env.get('JAVACLASSSUFFIX', '.class') classdir = env.get('JAVACLASSDIR') if not classdir: try: s = source[0] except IndexError: classdir = '.' else: try: classdir = s.attributes.java_classdir except AttributeError: classdir = '.' classdir = env.Dir(classdir).rdir() if str(classdir) == '.': c_ = None else: c_ = str(classdir) + os.sep slist = [] for src in source: try: classname = src.attributes.java_classname except AttributeError: classname = str(src) if c_ and classname[:len(c_)] == c_: classname = classname[len(c_):] if class_suffix and classname[:-len(class_suffix)] == class_suffix: classname = classname[-len(class_suffix):] s = src.rfile() s.attributes.java_classdir = classdir s.attributes.java_classname = classname slist.append(s) stub_suffixes = ['_Stub'] if env.get('JAVAVERSION') == '1.4': stub_suffixes.append('_Skel') tlist = [] for s in source: for suff in stub_suffixes: fname = s.attributes.java_classname.replace('.', os.sep) + \ suff + class_suffix t = target[0].File(fname) t.attributes.java_lookupdir = target[0] tlist.append(t) return tlist, source
python
def emit_rmic_classes(target, source, env): """Create and return lists of Java RMI stub and skeleton class files to be created from a set of class files. """ class_suffix = env.get('JAVACLASSSUFFIX', '.class') classdir = env.get('JAVACLASSDIR') if not classdir: try: s = source[0] except IndexError: classdir = '.' else: try: classdir = s.attributes.java_classdir except AttributeError: classdir = '.' classdir = env.Dir(classdir).rdir() if str(classdir) == '.': c_ = None else: c_ = str(classdir) + os.sep slist = [] for src in source: try: classname = src.attributes.java_classname except AttributeError: classname = str(src) if c_ and classname[:len(c_)] == c_: classname = classname[len(c_):] if class_suffix and classname[:-len(class_suffix)] == class_suffix: classname = classname[-len(class_suffix):] s = src.rfile() s.attributes.java_classdir = classdir s.attributes.java_classname = classname slist.append(s) stub_suffixes = ['_Stub'] if env.get('JAVAVERSION') == '1.4': stub_suffixes.append('_Skel') tlist = [] for s in source: for suff in stub_suffixes: fname = s.attributes.java_classname.replace('.', os.sep) + \ suff + class_suffix t = target[0].File(fname) t.attributes.java_lookupdir = target[0] tlist.append(t) return tlist, source
['def', 'emit_rmic_classes', '(', 'target', ',', 'source', ',', 'env', ')', ':', 'class_suffix', '=', 'env', '.', 'get', '(', "'JAVACLASSSUFFIX'", ',', "'.class'", ')', 'classdir', '=', 'env', '.', 'get', '(', "'JAVACLASSDIR'", ')', 'if', 'not', 'classdir', ':', 'try', ':', 's', '=', 'source', '[', '0', ']', 'except', 'IndexError', ':', 'classdir', '=', "'.'", 'else', ':', 'try', ':', 'classdir', '=', 's', '.', 'attributes', '.', 'java_classdir', 'except', 'AttributeError', ':', 'classdir', '=', "'.'", 'classdir', '=', 'env', '.', 'Dir', '(', 'classdir', ')', '.', 'rdir', '(', ')', 'if', 'str', '(', 'classdir', ')', '==', "'.'", ':', 'c_', '=', 'None', 'else', ':', 'c_', '=', 'str', '(', 'classdir', ')', '+', 'os', '.', 'sep', 'slist', '=', '[', ']', 'for', 'src', 'in', 'source', ':', 'try', ':', 'classname', '=', 'src', '.', 'attributes', '.', 'java_classname', 'except', 'AttributeError', ':', 'classname', '=', 'str', '(', 'src', ')', 'if', 'c_', 'and', 'classname', '[', ':', 'len', '(', 'c_', ')', ']', '==', 'c_', ':', 'classname', '=', 'classname', '[', 'len', '(', 'c_', ')', ':', ']', 'if', 'class_suffix', 'and', 'classname', '[', ':', '-', 'len', '(', 'class_suffix', ')', ']', '==', 'class_suffix', ':', 'classname', '=', 'classname', '[', '-', 'len', '(', 'class_suffix', ')', ':', ']', 's', '=', 'src', '.', 'rfile', '(', ')', 's', '.', 'attributes', '.', 'java_classdir', '=', 'classdir', 's', '.', 'attributes', '.', 'java_classname', '=', 'classname', 'slist', '.', 'append', '(', 's', ')', 'stub_suffixes', '=', '[', "'_Stub'", ']', 'if', 'env', '.', 'get', '(', "'JAVAVERSION'", ')', '==', "'1.4'", ':', 'stub_suffixes', '.', 'append', '(', "'_Skel'", ')', 'tlist', '=', '[', ']', 'for', 's', 'in', 'source', ':', 'for', 'suff', 'in', 'stub_suffixes', ':', 'fname', '=', 's', '.', 'attributes', '.', 'java_classname', '.', 'replace', '(', "'.'", ',', 'os', '.', 'sep', ')', '+', 'suff', '+', 'class_suffix', 't', '=', 'target', '[', '0', ']', '.', 'File', '(', 'fname', ')', 't', '.', 'attributes', '.', 'java_lookupdir', '=', 'target', '[', '0', ']', 'tlist', '.', 'append', '(', 't', ')', 'return', 'tlist', ',', 'source']
Create and return lists of Java RMI stub and skeleton class files to be created from a set of class files.
['Create', 'and', 'return', 'lists', 'of', 'Java', 'RMI', 'stub', 'and', 'skeleton', 'class', 'files', 'to', 'be', 'created', 'from', 'a', 'set', 'of', 'class', 'files', '.']
train
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/rmic.py#L43-L94
1,035
netpieio/microgear-python
microgear/cache.py
set_item
def set_item(key,value): """Write JSON content from value argument to cached file and return""" CACHED_KEY_FILE = os.path.join(CURRENT_DIR, key) open(CACHED_KEY_FILE, "wb").write(json.dumps({"_": value}).encode('UTF-8')) return value
python
def set_item(key,value): """Write JSON content from value argument to cached file and return""" CACHED_KEY_FILE = os.path.join(CURRENT_DIR, key) open(CACHED_KEY_FILE, "wb").write(json.dumps({"_": value}).encode('UTF-8')) return value
['def', 'set_item', '(', 'key', ',', 'value', ')', ':', 'CACHED_KEY_FILE', '=', 'os', '.', 'path', '.', 'join', '(', 'CURRENT_DIR', ',', 'key', ')', 'open', '(', 'CACHED_KEY_FILE', ',', '"wb"', ')', '.', 'write', '(', 'json', '.', 'dumps', '(', '{', '"_"', ':', 'value', '}', ')', '.', 'encode', '(', "'UTF-8'", ')', ')', 'return', 'value']
Write JSON content from value argument to cached file and return
['Write', 'JSON', 'content', 'from', 'value', 'argument', 'to', 'cached', 'file', 'and', 'return']
train
https://github.com/netpieio/microgear-python/blob/ea9bb352c7dd84b92f3462177645eaa4d448d50b/microgear/cache.py#L19-L25
1,036
rootpy/rootpy
rootpy/plotting/graph.py
_Graph1DBase.Shift
def Shift(self, value, copy=False): """ Shift the graph left or right by value """ numPoints = self.GetN() if copy: shiftGraph = self.Clone() else: shiftGraph = self X = self.GetX() EXlow = self.GetEXlow() EXhigh = self.GetEXhigh() Y = self.GetY() EYlow = self.GetEYlow() EYhigh = self.GetEYhigh() for i in range(numPoints): shiftGraph.SetPoint(i, X[i] + value, Y[i]) shiftGraph.SetPointError( i, EXlow[i], EXhigh[i], EYlow[i], EYhigh[i]) return shiftGraph
python
def Shift(self, value, copy=False): """ Shift the graph left or right by value """ numPoints = self.GetN() if copy: shiftGraph = self.Clone() else: shiftGraph = self X = self.GetX() EXlow = self.GetEXlow() EXhigh = self.GetEXhigh() Y = self.GetY() EYlow = self.GetEYlow() EYhigh = self.GetEYhigh() for i in range(numPoints): shiftGraph.SetPoint(i, X[i] + value, Y[i]) shiftGraph.SetPointError( i, EXlow[i], EXhigh[i], EYlow[i], EYhigh[i]) return shiftGraph
['def', 'Shift', '(', 'self', ',', 'value', ',', 'copy', '=', 'False', ')', ':', 'numPoints', '=', 'self', '.', 'GetN', '(', ')', 'if', 'copy', ':', 'shiftGraph', '=', 'self', '.', 'Clone', '(', ')', 'else', ':', 'shiftGraph', '=', 'self', 'X', '=', 'self', '.', 'GetX', '(', ')', 'EXlow', '=', 'self', '.', 'GetEXlow', '(', ')', 'EXhigh', '=', 'self', '.', 'GetEXhigh', '(', ')', 'Y', '=', 'self', '.', 'GetY', '(', ')', 'EYlow', '=', 'self', '.', 'GetEYlow', '(', ')', 'EYhigh', '=', 'self', '.', 'GetEYhigh', '(', ')', 'for', 'i', 'in', 'range', '(', 'numPoints', ')', ':', 'shiftGraph', '.', 'SetPoint', '(', 'i', ',', 'X', '[', 'i', ']', '+', 'value', ',', 'Y', '[', 'i', ']', ')', 'shiftGraph', '.', 'SetPointError', '(', 'i', ',', 'EXlow', '[', 'i', ']', ',', 'EXhigh', '[', 'i', ']', ',', 'EYlow', '[', 'i', ']', ',', 'EYhigh', '[', 'i', ']', ')', 'return', 'shiftGraph']
Shift the graph left or right by value
['Shift', 'the', 'graph', 'left', 'or', 'right', 'by', 'value']
train
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/plotting/graph.py#L586-L607
1,037
Erotemic/utool
utool/Preferences.py
Pref.toggle
def toggle(self, key): """ Toggles a boolean key """ val = self[key] assert isinstance(val, bool), 'key[%r] = %r is not a bool' % (key, val) self.pref_update(key, not val)
python
def toggle(self, key): """ Toggles a boolean key """ val = self[key] assert isinstance(val, bool), 'key[%r] = %r is not a bool' % (key, val) self.pref_update(key, not val)
['def', 'toggle', '(', 'self', ',', 'key', ')', ':', 'val', '=', 'self', '[', 'key', ']', 'assert', 'isinstance', '(', 'val', ',', 'bool', ')', ',', "'key[%r] = %r is not a bool'", '%', '(', 'key', ',', 'val', ')', 'self', '.', 'pref_update', '(', 'key', ',', 'not', 'val', ')']
Toggles a boolean key
['Toggles', 'a', 'boolean', 'key']
train
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/Preferences.py#L143-L147
1,038
rsmuc/health_monitoring_plugins
health_monitoring_plugins/raritan.py
Raritan.check_sensor
def check_sensor(self, helper): """ check the status of the specified sensor """ try: sensor_name, sensor_state, sensor_type = self.sess.get_oids( self.oids['oid_sensor_name'], self.oids['oid_sensor_state'], self.oids['oid_sensor_type']) except health_monitoring_plugins.SnmpException as e: helper.exit(summary=str(e), exit_code=unknown, perfdata='') try: sensor_state_string = states[int(sensor_state)] except KeyError as e: helper.exit(summary="Invalid sensor response " + sensor_state, exit_code=unknown, perfdata='') sensor_unit = "" # if it's a onOff Sensor or something like that, we need an empty string for the summary sensor_unit_string = "" sensor_value = "" sensor_digit = "" real_sensor_value = "" sensor_warning_upper = "" sensor_critical_upper = "" sensor_warning_lower = "" sensor_critical_lower = "" if int(sensor_type) not in [14, 16, 17, 18, 19, 20]: # for all sensors except these, we want to calculate the real value and show the metric. # 14: onOff # 16: vibration # 17: waterDetection # 18: smokeDetection # 19: binary # 20: contact try: sensor_unit, sensor_digit, sensor_warning_upper, sensor_critical_upper, sensor_warning_lower, sensor_critical_lower, sensor_value = self.sess.get_oids( self.oids['oid_sensor_unit'], self.oids['oid_sensor_digit'], self.oids['oid_sensor_warning_upper'], self.oids['oid_sensor_critical_upper'], self.oids['oid_sensor_warning_lower'], self.oids['oid_sensor_critical_lower'], self.oids['oid_sensor_value']) except health_monitoring_plugins.SnmpException as e: helper.exit(summary=str(e), exit_code=unknown, perfdata='') sensor_unit_string = units[int(sensor_unit)] real_sensor_value = real_value(int(sensor_value), sensor_digit) real_sensor_warning_upper = real_value(sensor_warning_upper, sensor_digit) real_sensor_critical_upper = real_value(sensor_critical_upper, sensor_digit) real_sensor_warning_lower = real_value(sensor_warning_lower, sensor_digit) real_sensor_critical_lower = real_value(sensor_critical_lower, sensor_digit) # metrics are only possible for these sensors helper.add_metric(sensor_name + " -%s- " % sensor_unit_string, real_sensor_value, real_sensor_warning_lower +\ ":" + real_sensor_warning_upper, real_sensor_critical_lower +\ ":" + real_sensor_critical_upper, "", "", "") # "OK" state if sensor_state_string in ["closed", "normal", "on", "notDetected", "ok", "yes", "one", "two", "inSync"]: helper.status(ok) # "WARNING" state elif sensor_state_string in ["open", "belowLowerWarning", "aboveUpperWarning", "marginal", "standby"]: helper.status(warning) # "CRITICAL" state elif sensor_state_string in ["belowLowerCritical", "aboveUpperCritical", "off", "detected", "alarmed", "fail", "no", "outOfSync"]: helper.status(critical) # "UNKOWN" state elif sensor_state_string in ["unavailable"]: helper.status(unknown) # received an undefined state else: helper.exit(summary="Something went wrong - received undefined state", exit_code=unknown, perfdata='') # summary is shown for all sensors helper.add_summary("Sensor %s - '%s' %s%s is: %s" % (self.number, sensor_name, real_sensor_value, sensor_unit_string, sensor_state_string))
python
def check_sensor(self, helper): """ check the status of the specified sensor """ try: sensor_name, sensor_state, sensor_type = self.sess.get_oids( self.oids['oid_sensor_name'], self.oids['oid_sensor_state'], self.oids['oid_sensor_type']) except health_monitoring_plugins.SnmpException as e: helper.exit(summary=str(e), exit_code=unknown, perfdata='') try: sensor_state_string = states[int(sensor_state)] except KeyError as e: helper.exit(summary="Invalid sensor response " + sensor_state, exit_code=unknown, perfdata='') sensor_unit = "" # if it's a onOff Sensor or something like that, we need an empty string for the summary sensor_unit_string = "" sensor_value = "" sensor_digit = "" real_sensor_value = "" sensor_warning_upper = "" sensor_critical_upper = "" sensor_warning_lower = "" sensor_critical_lower = "" if int(sensor_type) not in [14, 16, 17, 18, 19, 20]: # for all sensors except these, we want to calculate the real value and show the metric. # 14: onOff # 16: vibration # 17: waterDetection # 18: smokeDetection # 19: binary # 20: contact try: sensor_unit, sensor_digit, sensor_warning_upper, sensor_critical_upper, sensor_warning_lower, sensor_critical_lower, sensor_value = self.sess.get_oids( self.oids['oid_sensor_unit'], self.oids['oid_sensor_digit'], self.oids['oid_sensor_warning_upper'], self.oids['oid_sensor_critical_upper'], self.oids['oid_sensor_warning_lower'], self.oids['oid_sensor_critical_lower'], self.oids['oid_sensor_value']) except health_monitoring_plugins.SnmpException as e: helper.exit(summary=str(e), exit_code=unknown, perfdata='') sensor_unit_string = units[int(sensor_unit)] real_sensor_value = real_value(int(sensor_value), sensor_digit) real_sensor_warning_upper = real_value(sensor_warning_upper, sensor_digit) real_sensor_critical_upper = real_value(sensor_critical_upper, sensor_digit) real_sensor_warning_lower = real_value(sensor_warning_lower, sensor_digit) real_sensor_critical_lower = real_value(sensor_critical_lower, sensor_digit) # metrics are only possible for these sensors helper.add_metric(sensor_name + " -%s- " % sensor_unit_string, real_sensor_value, real_sensor_warning_lower +\ ":" + real_sensor_warning_upper, real_sensor_critical_lower +\ ":" + real_sensor_critical_upper, "", "", "") # "OK" state if sensor_state_string in ["closed", "normal", "on", "notDetected", "ok", "yes", "one", "two", "inSync"]: helper.status(ok) # "WARNING" state elif sensor_state_string in ["open", "belowLowerWarning", "aboveUpperWarning", "marginal", "standby"]: helper.status(warning) # "CRITICAL" state elif sensor_state_string in ["belowLowerCritical", "aboveUpperCritical", "off", "detected", "alarmed", "fail", "no", "outOfSync"]: helper.status(critical) # "UNKOWN" state elif sensor_state_string in ["unavailable"]: helper.status(unknown) # received an undefined state else: helper.exit(summary="Something went wrong - received undefined state", exit_code=unknown, perfdata='') # summary is shown for all sensors helper.add_summary("Sensor %s - '%s' %s%s is: %s" % (self.number, sensor_name, real_sensor_value, sensor_unit_string, sensor_state_string))
['def', 'check_sensor', '(', 'self', ',', 'helper', ')', ':', 'try', ':', 'sensor_name', ',', 'sensor_state', ',', 'sensor_type', '=', 'self', '.', 'sess', '.', 'get_oids', '(', 'self', '.', 'oids', '[', "'oid_sensor_name'", ']', ',', 'self', '.', 'oids', '[', "'oid_sensor_state'", ']', ',', 'self', '.', 'oids', '[', "'oid_sensor_type'", ']', ')', 'except', 'health_monitoring_plugins', '.', 'SnmpException', 'as', 'e', ':', 'helper', '.', 'exit', '(', 'summary', '=', 'str', '(', 'e', ')', ',', 'exit_code', '=', 'unknown', ',', 'perfdata', '=', "''", ')', 'try', ':', 'sensor_state_string', '=', 'states', '[', 'int', '(', 'sensor_state', ')', ']', 'except', 'KeyError', 'as', 'e', ':', 'helper', '.', 'exit', '(', 'summary', '=', '"Invalid sensor response "', '+', 'sensor_state', ',', 'exit_code', '=', 'unknown', ',', 'perfdata', '=', "''", ')', 'sensor_unit', '=', '""', "# if it's a onOff Sensor or something like that, we need an empty string for the summary", 'sensor_unit_string', '=', '""', 'sensor_value', '=', '""', 'sensor_digit', '=', '""', 'real_sensor_value', '=', '""', 'sensor_warning_upper', '=', '""', 'sensor_critical_upper', '=', '""', 'sensor_warning_lower', '=', '""', 'sensor_critical_lower', '=', '""', 'if', 'int', '(', 'sensor_type', ')', 'not', 'in', '[', '14', ',', '16', ',', '17', ',', '18', ',', '19', ',', '20', ']', ':', '# for all sensors except these, we want to calculate the real value and show the metric.', '# 14: onOff', '# 16: vibration', '# 17: waterDetection', '# 18: smokeDetection', '# 19: binary', '# 20: contact', 'try', ':', 'sensor_unit', ',', 'sensor_digit', ',', 'sensor_warning_upper', ',', 'sensor_critical_upper', ',', 'sensor_warning_lower', ',', 'sensor_critical_lower', ',', 'sensor_value', '=', 'self', '.', 'sess', '.', 'get_oids', '(', 'self', '.', 'oids', '[', "'oid_sensor_unit'", ']', ',', 'self', '.', 'oids', '[', "'oid_sensor_digit'", ']', ',', 'self', '.', 'oids', '[', "'oid_sensor_warning_upper'", ']', ',', 'self', '.', 'oids', '[', "'oid_sensor_critical_upper'", ']', ',', 'self', '.', 'oids', '[', "'oid_sensor_warning_lower'", ']', ',', 'self', '.', 'oids', '[', "'oid_sensor_critical_lower'", ']', ',', 'self', '.', 'oids', '[', "'oid_sensor_value'", ']', ')', 'except', 'health_monitoring_plugins', '.', 'SnmpException', 'as', 'e', ':', 'helper', '.', 'exit', '(', 'summary', '=', 'str', '(', 'e', ')', ',', 'exit_code', '=', 'unknown', ',', 'perfdata', '=', "''", ')', 'sensor_unit_string', '=', 'units', '[', 'int', '(', 'sensor_unit', ')', ']', 'real_sensor_value', '=', 'real_value', '(', 'int', '(', 'sensor_value', ')', ',', 'sensor_digit', ')', 'real_sensor_warning_upper', '=', 'real_value', '(', 'sensor_warning_upper', ',', 'sensor_digit', ')', 'real_sensor_critical_upper', '=', 'real_value', '(', 'sensor_critical_upper', ',', 'sensor_digit', ')', 'real_sensor_warning_lower', '=', 'real_value', '(', 'sensor_warning_lower', ',', 'sensor_digit', ')', 'real_sensor_critical_lower', '=', 'real_value', '(', 'sensor_critical_lower', ',', 'sensor_digit', ')', '# metrics are only possible for these sensors', 'helper', '.', 'add_metric', '(', 'sensor_name', '+', '" -%s- "', '%', 'sensor_unit_string', ',', 'real_sensor_value', ',', 'real_sensor_warning_lower', '+', '":"', '+', 'real_sensor_warning_upper', ',', 'real_sensor_critical_lower', '+', '":"', '+', 'real_sensor_critical_upper', ',', '""', ',', '""', ',', '""', ')', '# "OK" state', 'if', 'sensor_state_string', 'in', '[', '"closed"', ',', '"normal"', ',', '"on"', ',', '"notDetected"', ',', '"ok"', ',', '"yes"', ',', '"one"', ',', '"two"', ',', '"inSync"', ']', ':', 'helper', '.', 'status', '(', 'ok', ')', '# "WARNING" state', 'elif', 'sensor_state_string', 'in', '[', '"open"', ',', '"belowLowerWarning"', ',', '"aboveUpperWarning"', ',', '"marginal"', ',', '"standby"', ']', ':', 'helper', '.', 'status', '(', 'warning', ')', '# "CRITICAL" state', 'elif', 'sensor_state_string', 'in', '[', '"belowLowerCritical"', ',', '"aboveUpperCritical"', ',', '"off"', ',', '"detected"', ',', '"alarmed"', ',', '"fail"', ',', '"no"', ',', '"outOfSync"', ']', ':', 'helper', '.', 'status', '(', 'critical', ')', '# "UNKOWN" state ', 'elif', 'sensor_state_string', 'in', '[', '"unavailable"', ']', ':', 'helper', '.', 'status', '(', 'unknown', ')', '# received an undefined state ', 'else', ':', 'helper', '.', 'exit', '(', 'summary', '=', '"Something went wrong - received undefined state"', ',', 'exit_code', '=', 'unknown', ',', 'perfdata', '=', "''", ')', '# summary is shown for all sensors', 'helper', '.', 'add_summary', '(', '"Sensor %s - \'%s\' %s%s is: %s"', '%', '(', 'self', '.', 'number', ',', 'sensor_name', ',', 'real_sensor_value', ',', 'sensor_unit_string', ',', 'sensor_state_string', ')', ')']
check the status of the specified sensor
['check', 'the', 'status', 'of', 'the', 'specified', 'sensor']
train
https://github.com/rsmuc/health_monitoring_plugins/blob/7ac29dfb9fe46c055b018cb72ad0d7d8065589b9/health_monitoring_plugins/raritan.py#L174-L248
1,039
RobotStudio/bors
bors/api/websock.py
SockChannel.connect
def connect(self, sock): """Attach a given socket to a channel""" def cbwrap(*args, **kwargs): """Callback wrapper; passes in response_type""" self.callback(self.response_type, *args, **kwargs) self.sock = sock self.sock.subscribe(self.channel) self.sock.onchannel(self.channel, cbwrap)
python
def connect(self, sock): """Attach a given socket to a channel""" def cbwrap(*args, **kwargs): """Callback wrapper; passes in response_type""" self.callback(self.response_type, *args, **kwargs) self.sock = sock self.sock.subscribe(self.channel) self.sock.onchannel(self.channel, cbwrap)
['def', 'connect', '(', 'self', ',', 'sock', ')', ':', 'def', 'cbwrap', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', '"""Callback wrapper; passes in response_type"""', 'self', '.', 'callback', '(', 'self', '.', 'response_type', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'self', '.', 'sock', '=', 'sock', 'self', '.', 'sock', '.', 'subscribe', '(', 'self', '.', 'channel', ')', 'self', '.', 'sock', '.', 'onchannel', '(', 'self', '.', 'channel', ',', 'cbwrap', ')']
Attach a given socket to a channel
['Attach', 'a', 'given', 'socket', 'to', 'a', 'channel']
train
https://github.com/RobotStudio/bors/blob/38bf338fc6905d90819faa56bd832140116720f0/bors/api/websock.py#L96-L104
1,040
tehmaze/natural
natural/phone.py
enum
def enum(number, zone='e164.arpa'): ''' Printable DNS ENUM (telephone number mapping) record. :param number: string :param zone: string >>> print(enum('+31 20 5423 1567')) 7.6.5.1.3.2.4.5.0.2.1.3.e164.arpa. >>> print(enum('+31 97 99 6642', zone='e164.spacephone.org')) 2.4.6.6.9.9.7.9.1.3.e164.spacephone.org. ''' number = e164(number).lstrip('+') return u'.'.join([ u'.'.join(number[::-1]), zone.strip(u'.'), '', ])
python
def enum(number, zone='e164.arpa'): ''' Printable DNS ENUM (telephone number mapping) record. :param number: string :param zone: string >>> print(enum('+31 20 5423 1567')) 7.6.5.1.3.2.4.5.0.2.1.3.e164.arpa. >>> print(enum('+31 97 99 6642', zone='e164.spacephone.org')) 2.4.6.6.9.9.7.9.1.3.e164.spacephone.org. ''' number = e164(number).lstrip('+') return u'.'.join([ u'.'.join(number[::-1]), zone.strip(u'.'), '', ])
['def', 'enum', '(', 'number', ',', 'zone', '=', "'e164.arpa'", ')', ':', 'number', '=', 'e164', '(', 'number', ')', '.', 'lstrip', '(', "'+'", ')', 'return', "u'.'", '.', 'join', '(', '[', "u'.'", '.', 'join', '(', 'number', '[', ':', ':', '-', '1', ']', ')', ',', 'zone', '.', 'strip', '(', "u'.'", ')', ',', "''", ',', ']', ')']
Printable DNS ENUM (telephone number mapping) record. :param number: string :param zone: string >>> print(enum('+31 20 5423 1567')) 7.6.5.1.3.2.4.5.0.2.1.3.e164.arpa. >>> print(enum('+31 97 99 6642', zone='e164.spacephone.org')) 2.4.6.6.9.9.7.9.1.3.e164.spacephone.org.
['Printable', 'DNS', 'ENUM', '(', 'telephone', 'number', 'mapping', ')', 'record', '.']
train
https://github.com/tehmaze/natural/blob/d7a1fc9de712f9bcf68884a80826a7977df356fb/natural/phone.py#L123-L142
1,041
greenbone/ospd
ospd/ospd.py
OSPDaemon.get_scanner_param_type
def get_scanner_param_type(self, param): """ Returns type of a scanner parameter. """ assert isinstance(param, str) entry = self.scanner_params.get(param) if not entry: return None return entry.get('type')
python
def get_scanner_param_type(self, param): """ Returns type of a scanner parameter. """ assert isinstance(param, str) entry = self.scanner_params.get(param) if not entry: return None return entry.get('type')
['def', 'get_scanner_param_type', '(', 'self', ',', 'param', ')', ':', 'assert', 'isinstance', '(', 'param', ',', 'str', ')', 'entry', '=', 'self', '.', 'scanner_params', '.', 'get', '(', 'param', ')', 'if', 'not', 'entry', ':', 'return', 'None', 'return', 'entry', '.', 'get', '(', "'type'", ')']
Returns type of a scanner parameter.
['Returns', 'type', 'of', 'a', 'scanner', 'parameter', '.']
train
https://github.com/greenbone/ospd/blob/cef773166b15a19c17764721d3fe404fa0e107bf/ospd/ospd.py#L675-L681
1,042
amperser/proselint
proselint/checks/consistency/spacing.py
check
def check(text): """Check the text.""" err = "consistency.spacing" msg = "Inconsistent spacing after period (1 vs. 2 spaces)." regex = ["[\.\?!] [A-Z]", "[\.\?!] [A-Z]"] return consistency_check(text, [regex], err, msg)
python
def check(text): """Check the text.""" err = "consistency.spacing" msg = "Inconsistent spacing after period (1 vs. 2 spaces)." regex = ["[\.\?!] [A-Z]", "[\.\?!] [A-Z]"] return consistency_check(text, [regex], err, msg)
['def', 'check', '(', 'text', ')', ':', 'err', '=', '"consistency.spacing"', 'msg', '=', '"Inconsistent spacing after period (1 vs. 2 spaces)."', 'regex', '=', '[', '"[\\.\\?!] [A-Z]"', ',', '"[\\.\\?!] [A-Z]"', ']', 'return', 'consistency_check', '(', 'text', ',', '[', 'regex', ']', ',', 'err', ',', 'msg', ')']
Check the text.
['Check', 'the', 'text', '.']
train
https://github.com/amperser/proselint/blob/cb619ee4023cc7856f5fb96aec2a33a2c9f1a2e2/proselint/checks/consistency/spacing.py#L21-L27
1,043
ponty/psidialogs
psidialogs/api/easydialogs_api.py
AskFileForSave
def AskFileForSave(message=None, savedFileName=None, version=None, defaultLocation=None, dialogOptionFlags=None, location=None, clientName=None, windowTitle=None, actionButtonLabel=None, cancelButtonLabel=None, preferenceKey=None, popupExtension=None, eventProc=None, fileType=None, fileCreator=None, wanted=None, multiple=None): """Original doc: Display a dialog asking the user for a filename to save to. wanted is the return type wanted: FSSpec, FSRef, unicode or string (default) the other arguments can be looked up in Apple's Navigation Services documentation""" return psidialogs.ask_file(message=message, save=True)
python
def AskFileForSave(message=None, savedFileName=None, version=None, defaultLocation=None, dialogOptionFlags=None, location=None, clientName=None, windowTitle=None, actionButtonLabel=None, cancelButtonLabel=None, preferenceKey=None, popupExtension=None, eventProc=None, fileType=None, fileCreator=None, wanted=None, multiple=None): """Original doc: Display a dialog asking the user for a filename to save to. wanted is the return type wanted: FSSpec, FSRef, unicode or string (default) the other arguments can be looked up in Apple's Navigation Services documentation""" return psidialogs.ask_file(message=message, save=True)
['def', 'AskFileForSave', '(', 'message', '=', 'None', ',', 'savedFileName', '=', 'None', ',', 'version', '=', 'None', ',', 'defaultLocation', '=', 'None', ',', 'dialogOptionFlags', '=', 'None', ',', 'location', '=', 'None', ',', 'clientName', '=', 'None', ',', 'windowTitle', '=', 'None', ',', 'actionButtonLabel', '=', 'None', ',', 'cancelButtonLabel', '=', 'None', ',', 'preferenceKey', '=', 'None', ',', 'popupExtension', '=', 'None', ',', 'eventProc', '=', 'None', ',', 'fileType', '=', 'None', ',', 'fileCreator', '=', 'None', ',', 'wanted', '=', 'None', ',', 'multiple', '=', 'None', ')', ':', 'return', 'psidialogs', '.', 'ask_file', '(', 'message', '=', 'message', ',', 'save', '=', 'True', ')']
Original doc: Display a dialog asking the user for a filename to save to. wanted is the return type wanted: FSSpec, FSRef, unicode or string (default) the other arguments can be looked up in Apple's Navigation Services documentation
['Original', 'doc', ':', 'Display', 'a', 'dialog', 'asking', 'the', 'user', 'for', 'a', 'filename', 'to', 'save', 'to', '.']
train
https://github.com/ponty/psidialogs/blob/e385ab6b48cb43af52b810a1bf76a8135f4585b8/psidialogs/api/easydialogs_api.py#L4-L9
1,044
Checksum/landfill
landfill.py
fake_print
def fake_print(self): ''' This is the overridden __str__ method for Operation Recursively prints out the actual query to be executed ''' def _fake_run(): kwargs = self.kwargs.copy() kwargs['generate'] = True return _fake_handle_result( getattr(self.migrator, self.method)(*self.args, **kwargs) ) def _fake_handle_result(result): if isinstance(result, Node): sql, params = self._parse_node(result) return (sql, params) elif isinstance(result, Operation): return str(result) elif isinstance(result, (list, tuple)): return '\n'.join([str(_fake_handle_result(item)) for item in result]) return str(_fake_run())
python
def fake_print(self): ''' This is the overridden __str__ method for Operation Recursively prints out the actual query to be executed ''' def _fake_run(): kwargs = self.kwargs.copy() kwargs['generate'] = True return _fake_handle_result( getattr(self.migrator, self.method)(*self.args, **kwargs) ) def _fake_handle_result(result): if isinstance(result, Node): sql, params = self._parse_node(result) return (sql, params) elif isinstance(result, Operation): return str(result) elif isinstance(result, (list, tuple)): return '\n'.join([str(_fake_handle_result(item)) for item in result]) return str(_fake_run())
['def', 'fake_print', '(', 'self', ')', ':', 'def', '_fake_run', '(', ')', ':', 'kwargs', '=', 'self', '.', 'kwargs', '.', 'copy', '(', ')', 'kwargs', '[', "'generate'", ']', '=', 'True', 'return', '_fake_handle_result', '(', 'getattr', '(', 'self', '.', 'migrator', ',', 'self', '.', 'method', ')', '(', '*', 'self', '.', 'args', ',', '*', '*', 'kwargs', ')', ')', 'def', '_fake_handle_result', '(', 'result', ')', ':', 'if', 'isinstance', '(', 'result', ',', 'Node', ')', ':', 'sql', ',', 'params', '=', 'self', '.', '_parse_node', '(', 'result', ')', 'return', '(', 'sql', ',', 'params', ')', 'elif', 'isinstance', '(', 'result', ',', 'Operation', ')', ':', 'return', 'str', '(', 'result', ')', 'elif', 'isinstance', '(', 'result', ',', '(', 'list', ',', 'tuple', ')', ')', ':', 'return', "'\\n'", '.', 'join', '(', '[', 'str', '(', '_fake_handle_result', '(', 'item', ')', ')', 'for', 'item', 'in', 'result', ']', ')', 'return', 'str', '(', '_fake_run', '(', ')', ')']
This is the overridden __str__ method for Operation Recursively prints out the actual query to be executed
['This', 'is', 'the', 'overridden', '__str__', 'method', 'for', 'Operation', 'Recursively', 'prints', 'out', 'the', 'actual', 'query', 'to', 'be', 'executed']
train
https://github.com/Checksum/landfill/blob/bf1ea36042a00dc848e1380b8dc0153bb7e89fea/landfill.py#L371-L392
1,045
mojaie/chorus
chorus/topology.py
minify_ring
def minify_ring(mol, verbose=False): """ Minify ring set (similar to SSSR) Limitation: this can not correctly recognize minimum rings in the case of non-outerplanar graph. Note: concept of SSSR is controversial. Roughly reduce the size of cycle basis can help some scaffold-based analysis """ mol.require("Topology") for cyc_idx in mol.scaffolds: rings = deque(sorted([mol.rings[c] for c in cyc_idx], key=len)) minified = [] cnt = 0 while rings: cnt += 1 if cnt > 100: mol.descriptors.add("MinifiedRing") raise RuntimeError("Ring minimization failed") r = rings.popleft() init_r = r if verbose: print(len(r), "Ring:{}".format(r)) for m in minified: if verbose: print(len(m), "Minified:{}".format(m)) resolved = resolve_inclusion(r, m) if resolved: if verbose: print(len(resolved[0]), len(resolved[1]), "Resolved:{}".format(resolved)) r = resolved[0] if verbose: print(len(r), "New ring:{}\n".format(r)) if len(r) == len(init_r): # no longer be able to minified minified.append(r) else: # further minification required rings.append(r) for c in cyc_idx: mol.rings[c] = minified.pop() mol.descriptors.add("MinifiedRing")
python
def minify_ring(mol, verbose=False): """ Minify ring set (similar to SSSR) Limitation: this can not correctly recognize minimum rings in the case of non-outerplanar graph. Note: concept of SSSR is controversial. Roughly reduce the size of cycle basis can help some scaffold-based analysis """ mol.require("Topology") for cyc_idx in mol.scaffolds: rings = deque(sorted([mol.rings[c] for c in cyc_idx], key=len)) minified = [] cnt = 0 while rings: cnt += 1 if cnt > 100: mol.descriptors.add("MinifiedRing") raise RuntimeError("Ring minimization failed") r = rings.popleft() init_r = r if verbose: print(len(r), "Ring:{}".format(r)) for m in minified: if verbose: print(len(m), "Minified:{}".format(m)) resolved = resolve_inclusion(r, m) if resolved: if verbose: print(len(resolved[0]), len(resolved[1]), "Resolved:{}".format(resolved)) r = resolved[0] if verbose: print(len(r), "New ring:{}\n".format(r)) if len(r) == len(init_r): # no longer be able to minified minified.append(r) else: # further minification required rings.append(r) for c in cyc_idx: mol.rings[c] = minified.pop() mol.descriptors.add("MinifiedRing")
['def', 'minify_ring', '(', 'mol', ',', 'verbose', '=', 'False', ')', ':', 'mol', '.', 'require', '(', '"Topology"', ')', 'for', 'cyc_idx', 'in', 'mol', '.', 'scaffolds', ':', 'rings', '=', 'deque', '(', 'sorted', '(', '[', 'mol', '.', 'rings', '[', 'c', ']', 'for', 'c', 'in', 'cyc_idx', ']', ',', 'key', '=', 'len', ')', ')', 'minified', '=', '[', ']', 'cnt', '=', '0', 'while', 'rings', ':', 'cnt', '+=', '1', 'if', 'cnt', '>', '100', ':', 'mol', '.', 'descriptors', '.', 'add', '(', '"MinifiedRing"', ')', 'raise', 'RuntimeError', '(', '"Ring minimization failed"', ')', 'r', '=', 'rings', '.', 'popleft', '(', ')', 'init_r', '=', 'r', 'if', 'verbose', ':', 'print', '(', 'len', '(', 'r', ')', ',', '"Ring:{}"', '.', 'format', '(', 'r', ')', ')', 'for', 'm', 'in', 'minified', ':', 'if', 'verbose', ':', 'print', '(', 'len', '(', 'm', ')', ',', '"Minified:{}"', '.', 'format', '(', 'm', ')', ')', 'resolved', '=', 'resolve_inclusion', '(', 'r', ',', 'm', ')', 'if', 'resolved', ':', 'if', 'verbose', ':', 'print', '(', 'len', '(', 'resolved', '[', '0', ']', ')', ',', 'len', '(', 'resolved', '[', '1', ']', ')', ',', '"Resolved:{}"', '.', 'format', '(', 'resolved', ')', ')', 'r', '=', 'resolved', '[', '0', ']', 'if', 'verbose', ':', 'print', '(', 'len', '(', 'r', ')', ',', '"New ring:{}\\n"', '.', 'format', '(', 'r', ')', ')', 'if', 'len', '(', 'r', ')', '==', 'len', '(', 'init_r', ')', ':', '# no longer be able to minified', 'minified', '.', 'append', '(', 'r', ')', 'else', ':', '# further minification required', 'rings', '.', 'append', '(', 'r', ')', 'for', 'c', 'in', 'cyc_idx', ':', 'mol', '.', 'rings', '[', 'c', ']', '=', 'minified', '.', 'pop', '(', ')', 'mol', '.', 'descriptors', '.', 'add', '(', '"MinifiedRing"', ')']
Minify ring set (similar to SSSR) Limitation: this can not correctly recognize minimum rings in the case of non-outerplanar graph. Note: concept of SSSR is controversial. Roughly reduce the size of cycle basis can help some scaffold-based analysis
['Minify', 'ring', 'set', '(', 'similar', 'to', 'SSSR', ')', 'Limitation', ':', 'this', 'can', 'not', 'correctly', 'recognize', 'minimum', 'rings', 'in', 'the', 'case', 'of', 'non', '-', 'outerplanar', 'graph', '.', 'Note', ':', 'concept', 'of', 'SSSR', 'is', 'controversial', '.', 'Roughly', 'reduce', 'the', 'size', 'of', 'cycle', 'basis', 'can', 'help', 'some', 'scaffold', '-', 'based', 'analysis']
train
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/topology.py#L84-L122
1,046
gwpy/gwpy
gwpy/io/nds2.py
minute_trend_times
def minute_trend_times(start, end): """Expand a [start, end) interval for use in querying for minute trends NDS2 requires start and end times for minute trends to be a multiple of 60 (to exactly match the time of a minute-trend sample), so this function expands the given ``[start, end)`` interval to the nearest multiples. Parameters ---------- start : `int` GPS start time of query end : `int` GPS end time of query Returns ------- mstart : `int` ``start`` rounded down to nearest multiple of 60 mend : `int` ``end`` rounded up to nearest multiple of 60 """ if start % 60: start = int(start) // 60 * 60 if end % 60: end = int(end) // 60 * 60 + 60 return int(start), int(end)
python
def minute_trend_times(start, end): """Expand a [start, end) interval for use in querying for minute trends NDS2 requires start and end times for minute trends to be a multiple of 60 (to exactly match the time of a minute-trend sample), so this function expands the given ``[start, end)`` interval to the nearest multiples. Parameters ---------- start : `int` GPS start time of query end : `int` GPS end time of query Returns ------- mstart : `int` ``start`` rounded down to nearest multiple of 60 mend : `int` ``end`` rounded up to nearest multiple of 60 """ if start % 60: start = int(start) // 60 * 60 if end % 60: end = int(end) // 60 * 60 + 60 return int(start), int(end)
['def', 'minute_trend_times', '(', 'start', ',', 'end', ')', ':', 'if', 'start', '%', '60', ':', 'start', '=', 'int', '(', 'start', ')', '//', '60', '*', '60', 'if', 'end', '%', '60', ':', 'end', '=', 'int', '(', 'end', ')', '//', '60', '*', '60', '+', '60', 'return', 'int', '(', 'start', ')', ',', 'int', '(', 'end', ')']
Expand a [start, end) interval for use in querying for minute trends NDS2 requires start and end times for minute trends to be a multiple of 60 (to exactly match the time of a minute-trend sample), so this function expands the given ``[start, end)`` interval to the nearest multiples. Parameters ---------- start : `int` GPS start time of query end : `int` GPS end time of query Returns ------- mstart : `int` ``start`` rounded down to nearest multiple of 60 mend : `int` ``end`` rounded up to nearest multiple of 60
['Expand', 'a', '[', 'start', 'end', ')', 'interval', 'for', 'use', 'in', 'querying', 'for', 'minute', 'trends']
train
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/nds2.py#L608-L634
1,047
pysathq/pysat
pysat/solvers.py
MinisatGH.solve_limited
def solve_limited(self, assumptions=[]): """ Solve internal formula using given budgets for conflicts and propagations. """ if self.minisat: if self.use_timer: start_time = time.clock() # saving default SIGINT handler def_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_DFL) self.status = pysolvers.minisatgh_solve_lim(self.minisat, assumptions) # recovering default SIGINT handler def_sigint_handler = signal.signal(signal.SIGINT, def_sigint_handler) if self.use_timer: self.call_time = time.clock() - start_time self.accu_time += self.call_time return self.status
python
def solve_limited(self, assumptions=[]): """ Solve internal formula using given budgets for conflicts and propagations. """ if self.minisat: if self.use_timer: start_time = time.clock() # saving default SIGINT handler def_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_DFL) self.status = pysolvers.minisatgh_solve_lim(self.minisat, assumptions) # recovering default SIGINT handler def_sigint_handler = signal.signal(signal.SIGINT, def_sigint_handler) if self.use_timer: self.call_time = time.clock() - start_time self.accu_time += self.call_time return self.status
['def', 'solve_limited', '(', 'self', ',', 'assumptions', '=', '[', ']', ')', ':', 'if', 'self', '.', 'minisat', ':', 'if', 'self', '.', 'use_timer', ':', 'start_time', '=', 'time', '.', 'clock', '(', ')', '# saving default SIGINT handler', 'def_sigint_handler', '=', 'signal', '.', 'signal', '(', 'signal', '.', 'SIGINT', ',', 'signal', '.', 'SIG_DFL', ')', 'self', '.', 'status', '=', 'pysolvers', '.', 'minisatgh_solve_lim', '(', 'self', '.', 'minisat', ',', 'assumptions', ')', '# recovering default SIGINT handler', 'def_sigint_handler', '=', 'signal', '.', 'signal', '(', 'signal', '.', 'SIGINT', ',', 'def_sigint_handler', ')', 'if', 'self', '.', 'use_timer', ':', 'self', '.', 'call_time', '=', 'time', '.', 'clock', '(', ')', '-', 'start_time', 'self', '.', 'accu_time', '+=', 'self', '.', 'call_time', 'return', 'self', '.', 'status']
Solve internal formula using given budgets for conflicts and propagations.
['Solve', 'internal', 'formula', 'using', 'given', 'budgets', 'for', 'conflicts', 'and', 'propagations', '.']
train
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/pysat/solvers.py#L3269-L3291
1,048
pybel/pybel
src/pybel/io/indra.py
to_indra_statements
def to_indra_statements(graph): """Export this graph as a list of INDRA statements using the :py:class:`indra.sources.pybel.PybelProcessor`. :param pybel.BELGraph graph: A BEL graph :rtype: list[indra.statements.Statement] """ from indra.sources.bel import process_pybel_graph pbp = process_pybel_graph(graph) return pbp.statements
python
def to_indra_statements(graph): """Export this graph as a list of INDRA statements using the :py:class:`indra.sources.pybel.PybelProcessor`. :param pybel.BELGraph graph: A BEL graph :rtype: list[indra.statements.Statement] """ from indra.sources.bel import process_pybel_graph pbp = process_pybel_graph(graph) return pbp.statements
['def', 'to_indra_statements', '(', 'graph', ')', ':', 'from', 'indra', '.', 'sources', '.', 'bel', 'import', 'process_pybel_graph', 'pbp', '=', 'process_pybel_graph', '(', 'graph', ')', 'return', 'pbp', '.', 'statements']
Export this graph as a list of INDRA statements using the :py:class:`indra.sources.pybel.PybelProcessor`. :param pybel.BELGraph graph: A BEL graph :rtype: list[indra.statements.Statement]
['Export', 'this', 'graph', 'as', 'a', 'list', 'of', 'INDRA', 'statements', 'using', 'the', ':', 'py', ':', 'class', ':', 'indra', '.', 'sources', '.', 'pybel', '.', 'PybelProcessor', '.']
train
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/io/indra.py#L127-L136
1,049
raiden-network/raiden
raiden/network/upnpsock.py
connect
def connect(): """Try to connect to the router. Returns: u (miniupnc.UPnP): the connected upnp-instance router (string): the connection information """ upnp = miniupnpc.UPnP() upnp.discoverdelay = 200 providers = upnp.discover() if providers > 1: log.debug('multiple upnp providers found', num_providers=providers) elif providers < 1: log.error('no upnp providers found') return None try: location = upnp.selectigd() log.debug('connected', upnp=upnp) except Exception as e: log.error('Error when connecting to uPnP provider', exception_info=e) return None if not valid_mappable_ipv4(upnp.lanaddr): log.error('could not query your lanaddr', reported=upnp.lanaddr) return None try: # this can fail if router advertises uPnP incorrectly if not valid_mappable_ipv4(upnp.externalipaddress()): log.error('could not query your externalipaddress', reported=upnp.externalipaddress()) return None return upnp, location except Exception: log.error('error when connecting with uPnP provider', location=location) return None
python
def connect(): """Try to connect to the router. Returns: u (miniupnc.UPnP): the connected upnp-instance router (string): the connection information """ upnp = miniupnpc.UPnP() upnp.discoverdelay = 200 providers = upnp.discover() if providers > 1: log.debug('multiple upnp providers found', num_providers=providers) elif providers < 1: log.error('no upnp providers found') return None try: location = upnp.selectigd() log.debug('connected', upnp=upnp) except Exception as e: log.error('Error when connecting to uPnP provider', exception_info=e) return None if not valid_mappable_ipv4(upnp.lanaddr): log.error('could not query your lanaddr', reported=upnp.lanaddr) return None try: # this can fail if router advertises uPnP incorrectly if not valid_mappable_ipv4(upnp.externalipaddress()): log.error('could not query your externalipaddress', reported=upnp.externalipaddress()) return None return upnp, location except Exception: log.error('error when connecting with uPnP provider', location=location) return None
['def', 'connect', '(', ')', ':', 'upnp', '=', 'miniupnpc', '.', 'UPnP', '(', ')', 'upnp', '.', 'discoverdelay', '=', '200', 'providers', '=', 'upnp', '.', 'discover', '(', ')', 'if', 'providers', '>', '1', ':', 'log', '.', 'debug', '(', "'multiple upnp providers found'", ',', 'num_providers', '=', 'providers', ')', 'elif', 'providers', '<', '1', ':', 'log', '.', 'error', '(', "'no upnp providers found'", ')', 'return', 'None', 'try', ':', 'location', '=', 'upnp', '.', 'selectigd', '(', ')', 'log', '.', 'debug', '(', "'connected'", ',', 'upnp', '=', 'upnp', ')', 'except', 'Exception', 'as', 'e', ':', 'log', '.', 'error', '(', "'Error when connecting to uPnP provider'", ',', 'exception_info', '=', 'e', ')', 'return', 'None', 'if', 'not', 'valid_mappable_ipv4', '(', 'upnp', '.', 'lanaddr', ')', ':', 'log', '.', 'error', '(', "'could not query your lanaddr'", ',', 'reported', '=', 'upnp', '.', 'lanaddr', ')', 'return', 'None', 'try', ':', '# this can fail if router advertises uPnP incorrectly', 'if', 'not', 'valid_mappable_ipv4', '(', 'upnp', '.', 'externalipaddress', '(', ')', ')', ':', 'log', '.', 'error', '(', "'could not query your externalipaddress'", ',', 'reported', '=', 'upnp', '.', 'externalipaddress', '(', ')', ')', 'return', 'None', 'return', 'upnp', ',', 'location', 'except', 'Exception', ':', 'log', '.', 'error', '(', "'error when connecting with uPnP provider'", ',', 'location', '=', 'location', ')', 'return', 'None']
Try to connect to the router. Returns: u (miniupnc.UPnP): the connected upnp-instance router (string): the connection information
['Try', 'to', 'connect', 'to', 'the', 'router', '.']
train
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/network/upnpsock.py#L37-L70
1,050
saltstack/salt
salt/modules/state.py
single
def single(fun, name, test=None, queue=False, **kwargs): ''' Execute a single state function with the named kwargs, returns False if insufficient data is sent to the command By default, the values of the kwargs will be parsed as YAML. So, you can specify lists values, or lists of single entry key-value maps, as you would in a YAML salt file. Alternatively, JSON format of keyword values is also supported. CLI Example: .. code-block:: bash salt '*' state.single pkg.installed name=vim ''' conflict = _check_queue(queue, kwargs) if conflict is not None: return conflict comps = fun.split('.') if len(comps) < 2: __context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR return 'Invalid function passed' kwargs.update({'state': comps[0], 'fun': comps[1], '__id__': name, 'name': name}) orig_test = __opts__.get('test', None) opts = salt.utils.state.get_sls_opts(__opts__, **kwargs) opts['test'] = _get_test_value(test, **kwargs) pillar_override = kwargs.get('pillar') pillar_enc = kwargs.get('pillar_enc') if pillar_enc is None \ and pillar_override is not None \ and not isinstance(pillar_override, dict): raise SaltInvocationError( 'Pillar data must be formatted as a dictionary, unless pillar_enc ' 'is specified.' ) try: st_ = salt.state.State(opts, pillar_override, pillar_enc=pillar_enc, proxy=__proxy__, initial_pillar=_get_initial_pillar(opts)) except NameError: st_ = salt.state.State(opts, pillar_override, pillar_enc=pillar_enc, initial_pillar=_get_initial_pillar(opts)) err = st_.verify_data(kwargs) if err: __context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR return err st_._mod_init(kwargs) snapper_pre = _snapper_pre(opts, kwargs.get('__pub_jid', 'called localy')) ret = {'{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}'.format(kwargs): st_.call(kwargs)} _set_retcode(ret) # Work around Windows multiprocessing bug, set __opts__['test'] back to # value from before this function was run. _snapper_post(opts, kwargs.get('__pub_jid', 'called localy'), snapper_pre) __opts__['test'] = orig_test return ret
python
def single(fun, name, test=None, queue=False, **kwargs): ''' Execute a single state function with the named kwargs, returns False if insufficient data is sent to the command By default, the values of the kwargs will be parsed as YAML. So, you can specify lists values, or lists of single entry key-value maps, as you would in a YAML salt file. Alternatively, JSON format of keyword values is also supported. CLI Example: .. code-block:: bash salt '*' state.single pkg.installed name=vim ''' conflict = _check_queue(queue, kwargs) if conflict is not None: return conflict comps = fun.split('.') if len(comps) < 2: __context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR return 'Invalid function passed' kwargs.update({'state': comps[0], 'fun': comps[1], '__id__': name, 'name': name}) orig_test = __opts__.get('test', None) opts = salt.utils.state.get_sls_opts(__opts__, **kwargs) opts['test'] = _get_test_value(test, **kwargs) pillar_override = kwargs.get('pillar') pillar_enc = kwargs.get('pillar_enc') if pillar_enc is None \ and pillar_override is not None \ and not isinstance(pillar_override, dict): raise SaltInvocationError( 'Pillar data must be formatted as a dictionary, unless pillar_enc ' 'is specified.' ) try: st_ = salt.state.State(opts, pillar_override, pillar_enc=pillar_enc, proxy=__proxy__, initial_pillar=_get_initial_pillar(opts)) except NameError: st_ = salt.state.State(opts, pillar_override, pillar_enc=pillar_enc, initial_pillar=_get_initial_pillar(opts)) err = st_.verify_data(kwargs) if err: __context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR return err st_._mod_init(kwargs) snapper_pre = _snapper_pre(opts, kwargs.get('__pub_jid', 'called localy')) ret = {'{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}'.format(kwargs): st_.call(kwargs)} _set_retcode(ret) # Work around Windows multiprocessing bug, set __opts__['test'] back to # value from before this function was run. _snapper_post(opts, kwargs.get('__pub_jid', 'called localy'), snapper_pre) __opts__['test'] = orig_test return ret
['def', 'single', '(', 'fun', ',', 'name', ',', 'test', '=', 'None', ',', 'queue', '=', 'False', ',', '*', '*', 'kwargs', ')', ':', 'conflict', '=', '_check_queue', '(', 'queue', ',', 'kwargs', ')', 'if', 'conflict', 'is', 'not', 'None', ':', 'return', 'conflict', 'comps', '=', 'fun', '.', 'split', '(', "'.'", ')', 'if', 'len', '(', 'comps', ')', '<', '2', ':', '__context__', '[', "'retcode'", ']', '=', 'salt', '.', 'defaults', '.', 'exitcodes', '.', 'EX_STATE_COMPILER_ERROR', 'return', "'Invalid function passed'", 'kwargs', '.', 'update', '(', '{', "'state'", ':', 'comps', '[', '0', ']', ',', "'fun'", ':', 'comps', '[', '1', ']', ',', "'__id__'", ':', 'name', ',', "'name'", ':', 'name', '}', ')', 'orig_test', '=', '__opts__', '.', 'get', '(', "'test'", ',', 'None', ')', 'opts', '=', 'salt', '.', 'utils', '.', 'state', '.', 'get_sls_opts', '(', '__opts__', ',', '*', '*', 'kwargs', ')', 'opts', '[', "'test'", ']', '=', '_get_test_value', '(', 'test', ',', '*', '*', 'kwargs', ')', 'pillar_override', '=', 'kwargs', '.', 'get', '(', "'pillar'", ')', 'pillar_enc', '=', 'kwargs', '.', 'get', '(', "'pillar_enc'", ')', 'if', 'pillar_enc', 'is', 'None', 'and', 'pillar_override', 'is', 'not', 'None', 'and', 'not', 'isinstance', '(', 'pillar_override', ',', 'dict', ')', ':', 'raise', 'SaltInvocationError', '(', "'Pillar data must be formatted as a dictionary, unless pillar_enc '", "'is specified.'", ')', 'try', ':', 'st_', '=', 'salt', '.', 'state', '.', 'State', '(', 'opts', ',', 'pillar_override', ',', 'pillar_enc', '=', 'pillar_enc', ',', 'proxy', '=', '__proxy__', ',', 'initial_pillar', '=', '_get_initial_pillar', '(', 'opts', ')', ')', 'except', 'NameError', ':', 'st_', '=', 'salt', '.', 'state', '.', 'State', '(', 'opts', ',', 'pillar_override', ',', 'pillar_enc', '=', 'pillar_enc', ',', 'initial_pillar', '=', '_get_initial_pillar', '(', 'opts', ')', ')', 'err', '=', 'st_', '.', 'verify_data', '(', 'kwargs', ')', 'if', 'err', ':', '__context__', '[', "'retcode'", ']', '=', 'salt', '.', 'defaults', '.', 'exitcodes', '.', 'EX_STATE_COMPILER_ERROR', 'return', 'err', 'st_', '.', '_mod_init', '(', 'kwargs', ')', 'snapper_pre', '=', '_snapper_pre', '(', 'opts', ',', 'kwargs', '.', 'get', '(', "'__pub_jid'", ',', "'called localy'", ')', ')', 'ret', '=', '{', "'{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}'", '.', 'format', '(', 'kwargs', ')', ':', 'st_', '.', 'call', '(', 'kwargs', ')', '}', '_set_retcode', '(', 'ret', ')', "# Work around Windows multiprocessing bug, set __opts__['test'] back to", '# value from before this function was run.', '_snapper_post', '(', 'opts', ',', 'kwargs', '.', 'get', '(', "'__pub_jid'", ',', "'called localy'", ')', ',', 'snapper_pre', ')', '__opts__', '[', "'test'", ']', '=', 'orig_test', 'return', 'ret']
Execute a single state function with the named kwargs, returns False if insufficient data is sent to the command By default, the values of the kwargs will be parsed as YAML. So, you can specify lists values, or lists of single entry key-value maps, as you would in a YAML salt file. Alternatively, JSON format of keyword values is also supported. CLI Example: .. code-block:: bash salt '*' state.single pkg.installed name=vim
['Execute', 'a', 'single', 'state', 'function', 'with', 'the', 'named', 'kwargs', 'returns', 'False', 'if', 'insufficient', 'data', 'is', 'sent', 'to', 'the', 'command']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/state.py#L2032-L2099
1,051
Azure/azure-uamqp-python
uamqp/async_ops/session_async.py
SessionAsync.destroy_async
async def destroy_async(self): """Asynchronously close any open management Links and close the Session. Cleans up and C objects for both mgmt Links and Session. """ for _, link in self._mgmt_links.items(): await link.destroy_async() self._session.destroy()
python
async def destroy_async(self): """Asynchronously close any open management Links and close the Session. Cleans up and C objects for both mgmt Links and Session. """ for _, link in self._mgmt_links.items(): await link.destroy_async() self._session.destroy()
['async', 'def', 'destroy_async', '(', 'self', ')', ':', 'for', '_', ',', 'link', 'in', 'self', '.', '_mgmt_links', '.', 'items', '(', ')', ':', 'await', 'link', '.', 'destroy_async', '(', ')', 'self', '.', '_session', '.', 'destroy', '(', ')']
Asynchronously close any open management Links and close the Session. Cleans up and C objects for both mgmt Links and Session.
['Asynchronously', 'close', 'any', 'open', 'management', 'Links', 'and', 'close', 'the', 'Session', '.', 'Cleans', 'up', 'and', 'C', 'objects', 'for', 'both', 'mgmt', 'Links', 'and', 'Session', '.']
train
https://github.com/Azure/azure-uamqp-python/blob/b67e4fcaf2e8a337636947523570239c10a58ae2/uamqp/async_ops/session_async.py#L117-L123
1,052
Esri/ArcREST
src/arcrest/opendata/_web.py
WebOperations._processHandler
def _processHandler(self, securityHandler, param_dict): """proceses the handler and returns the cookiejar""" cj = None handler = None if securityHandler is None: cj = cookiejar.CookieJar() elif securityHandler.method.lower() == "token": param_dict['token'] = securityHandler.token if hasattr(securityHandler, 'cookiejar'): cj = securityHandler.cookiejar if hasattr(securityHandler, 'handler'): handler = securityHandler.handler elif securityHandler.method.lower() == "handler": handler = securityHandler.handler cj = securityHandler.cookiejar return param_dict, handler, cj
python
def _processHandler(self, securityHandler, param_dict): """proceses the handler and returns the cookiejar""" cj = None handler = None if securityHandler is None: cj = cookiejar.CookieJar() elif securityHandler.method.lower() == "token": param_dict['token'] = securityHandler.token if hasattr(securityHandler, 'cookiejar'): cj = securityHandler.cookiejar if hasattr(securityHandler, 'handler'): handler = securityHandler.handler elif securityHandler.method.lower() == "handler": handler = securityHandler.handler cj = securityHandler.cookiejar return param_dict, handler, cj
['def', '_processHandler', '(', 'self', ',', 'securityHandler', ',', 'param_dict', ')', ':', 'cj', '=', 'None', 'handler', '=', 'None', 'if', 'securityHandler', 'is', 'None', ':', 'cj', '=', 'cookiejar', '.', 'CookieJar', '(', ')', 'elif', 'securityHandler', '.', 'method', '.', 'lower', '(', ')', '==', '"token"', ':', 'param_dict', '[', "'token'", ']', '=', 'securityHandler', '.', 'token', 'if', 'hasattr', '(', 'securityHandler', ',', "'cookiejar'", ')', ':', 'cj', '=', 'securityHandler', '.', 'cookiejar', 'if', 'hasattr', '(', 'securityHandler', ',', "'handler'", ')', ':', 'handler', '=', 'securityHandler', '.', 'handler', 'elif', 'securityHandler', '.', 'method', '.', 'lower', '(', ')', '==', '"handler"', ':', 'handler', '=', 'securityHandler', '.', 'handler', 'cj', '=', 'securityHandler', '.', 'cookiejar', 'return', 'param_dict', ',', 'handler', ',', 'cj']
proceses the handler and returns the cookiejar
['proceses', 'the', 'handler', 'and', 'returns', 'the', 'cookiejar']
train
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/opendata/_web.py#L221-L236
1,053
diffeo/yakonfig
yakonfig/toplevel.py
parse_args
def parse_args(parser, modules, args=None): """Set up global configuration for command-line tools. `modules` is an iterable of :class:`yakonfig.Configurable` objects, or anything equivalently typed. This function iterates through those objects and calls :meth:`~yakonfig.Configurable.add_arguments` on each to build up a complete list of command-line arguments, then calls :meth:`argparse.ArgumentParser.parse_args` to actually process the command line. This produces a configuration that is a combination of all default values declared by all modules; configuration specified in ``--config`` arguments; and overriding configuration values specified in command-line arguments. This returns the :class:`argparse.Namespace` object, in case the application has defined its own command-line parameters and needs to process them. The new global configuration can be obtained via :func:`yakonfig.get_global_config`. :param argparse.ArgumentParser parser: application-provided argument parser :param modules: modules or Configurable instances to use :type modules: iterable of :class:`~yakonfig.Configurable` :param args: command-line options, or `None` to use `sys.argv` :return: the new global configuration """ collect_add_argparse(parser, modules) namespace = parser.parse_args(args) try: do_dump_config = getattr(namespace, 'dump_config', None) set_default_config(modules, params=vars(namespace), validate=not do_dump_config) if do_dump_config: if namespace.dump_config == 'full': to_dump = get_global_config() elif namespace.dump_config == 'default': to_dump = assemble_default_config(modules) else: # 'effective' to_dump = diff_config(assemble_default_config(modules), get_global_config()) yaml_mod.dump(to_dump, sys.stdout) parser.exit() except ConfigurationError as e: parser.error(e) return namespace
python
def parse_args(parser, modules, args=None): """Set up global configuration for command-line tools. `modules` is an iterable of :class:`yakonfig.Configurable` objects, or anything equivalently typed. This function iterates through those objects and calls :meth:`~yakonfig.Configurable.add_arguments` on each to build up a complete list of command-line arguments, then calls :meth:`argparse.ArgumentParser.parse_args` to actually process the command line. This produces a configuration that is a combination of all default values declared by all modules; configuration specified in ``--config`` arguments; and overriding configuration values specified in command-line arguments. This returns the :class:`argparse.Namespace` object, in case the application has defined its own command-line parameters and needs to process them. The new global configuration can be obtained via :func:`yakonfig.get_global_config`. :param argparse.ArgumentParser parser: application-provided argument parser :param modules: modules or Configurable instances to use :type modules: iterable of :class:`~yakonfig.Configurable` :param args: command-line options, or `None` to use `sys.argv` :return: the new global configuration """ collect_add_argparse(parser, modules) namespace = parser.parse_args(args) try: do_dump_config = getattr(namespace, 'dump_config', None) set_default_config(modules, params=vars(namespace), validate=not do_dump_config) if do_dump_config: if namespace.dump_config == 'full': to_dump = get_global_config() elif namespace.dump_config == 'default': to_dump = assemble_default_config(modules) else: # 'effective' to_dump = diff_config(assemble_default_config(modules), get_global_config()) yaml_mod.dump(to_dump, sys.stdout) parser.exit() except ConfigurationError as e: parser.error(e) return namespace
['def', 'parse_args', '(', 'parser', ',', 'modules', ',', 'args', '=', 'None', ')', ':', 'collect_add_argparse', '(', 'parser', ',', 'modules', ')', 'namespace', '=', 'parser', '.', 'parse_args', '(', 'args', ')', 'try', ':', 'do_dump_config', '=', 'getattr', '(', 'namespace', ',', "'dump_config'", ',', 'None', ')', 'set_default_config', '(', 'modules', ',', 'params', '=', 'vars', '(', 'namespace', ')', ',', 'validate', '=', 'not', 'do_dump_config', ')', 'if', 'do_dump_config', ':', 'if', 'namespace', '.', 'dump_config', '==', "'full'", ':', 'to_dump', '=', 'get_global_config', '(', ')', 'elif', 'namespace', '.', 'dump_config', '==', "'default'", ':', 'to_dump', '=', 'assemble_default_config', '(', 'modules', ')', 'else', ':', "# 'effective'", 'to_dump', '=', 'diff_config', '(', 'assemble_default_config', '(', 'modules', ')', ',', 'get_global_config', '(', ')', ')', 'yaml_mod', '.', 'dump', '(', 'to_dump', ',', 'sys', '.', 'stdout', ')', 'parser', '.', 'exit', '(', ')', 'except', 'ConfigurationError', 'as', 'e', ':', 'parser', '.', 'error', '(', 'e', ')', 'return', 'namespace']
Set up global configuration for command-line tools. `modules` is an iterable of :class:`yakonfig.Configurable` objects, or anything equivalently typed. This function iterates through those objects and calls :meth:`~yakonfig.Configurable.add_arguments` on each to build up a complete list of command-line arguments, then calls :meth:`argparse.ArgumentParser.parse_args` to actually process the command line. This produces a configuration that is a combination of all default values declared by all modules; configuration specified in ``--config`` arguments; and overriding configuration values specified in command-line arguments. This returns the :class:`argparse.Namespace` object, in case the application has defined its own command-line parameters and needs to process them. The new global configuration can be obtained via :func:`yakonfig.get_global_config`. :param argparse.ArgumentParser parser: application-provided argument parser :param modules: modules or Configurable instances to use :type modules: iterable of :class:`~yakonfig.Configurable` :param args: command-line options, or `None` to use `sys.argv` :return: the new global configuration
['Set', 'up', 'global', 'configuration', 'for', 'command', '-', 'line', 'tools', '.']
train
https://github.com/diffeo/yakonfig/blob/412e195da29b4f4fc7b72967c192714a6f5eaeb5/yakonfig/toplevel.py#L69-L115
1,054
pgjones/quart
quart/blueprints.py
Blueprint.teardown_websocket
def teardown_websocket(self, func: Callable) -> Callable: """Add a teardown websocket function to the Blueprint. This is designed to be used as a decorator, and has the same arguments as :meth:`~quart.Quart.teardown_websocket`. It applies only to requests that are routed to an endpoint in this blueprint. An example usage, .. code-block:: python blueprint = Blueprint(__name__) @blueprint.teardown_websocket def teardown(): ... """ self.record_once(lambda state: state.app.teardown_websocket(func, self.name)) return func
python
def teardown_websocket(self, func: Callable) -> Callable: """Add a teardown websocket function to the Blueprint. This is designed to be used as a decorator, and has the same arguments as :meth:`~quart.Quart.teardown_websocket`. It applies only to requests that are routed to an endpoint in this blueprint. An example usage, .. code-block:: python blueprint = Blueprint(__name__) @blueprint.teardown_websocket def teardown(): ... """ self.record_once(lambda state: state.app.teardown_websocket(func, self.name)) return func
['def', 'teardown_websocket', '(', 'self', ',', 'func', ':', 'Callable', ')', '->', 'Callable', ':', 'self', '.', 'record_once', '(', 'lambda', 'state', ':', 'state', '.', 'app', '.', 'teardown_websocket', '(', 'func', ',', 'self', '.', 'name', ')', ')', 'return', 'func']
Add a teardown websocket function to the Blueprint. This is designed to be used as a decorator, and has the same arguments as :meth:`~quart.Quart.teardown_websocket`. It applies only to requests that are routed to an endpoint in this blueprint. An example usage, .. code-block:: python blueprint = Blueprint(__name__) @blueprint.teardown_websocket def teardown(): ...
['Add', 'a', 'teardown', 'websocket', 'function', 'to', 'the', 'Blueprint', '.']
train
https://github.com/pgjones/quart/blob/7cb2d3bd98e8746025764f2b933abc12041fa175/quart/blueprints.py#L481-L498
1,055
bitesofcode/projexui
projexui/widgets/xgroupbox.py
XGroupBox.isCollapsed
def isCollapsed( self ): """ Returns whether or not this group box is collapsed. :return <bool> """ if not self.isCollapsible(): return False if self._inverted: return self.isChecked() return not self.isChecked()
python
def isCollapsed( self ): """ Returns whether or not this group box is collapsed. :return <bool> """ if not self.isCollapsible(): return False if self._inverted: return self.isChecked() return not self.isChecked()
['def', 'isCollapsed', '(', 'self', ')', ':', 'if', 'not', 'self', '.', 'isCollapsible', '(', ')', ':', 'return', 'False', 'if', 'self', '.', '_inverted', ':', 'return', 'self', '.', 'isChecked', '(', ')', 'return', 'not', 'self', '.', 'isChecked', '(', ')']
Returns whether or not this group box is collapsed. :return <bool>
['Returns', 'whether', 'or', 'not', 'this', 'group', 'box', 'is', 'collapsed', '.', ':', 'return', '<bool', '>']
train
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xgroupbox.py#L54-L65
1,056
DLR-RM/RAFCON
source/rafcon/gui/helpers/state.py
extract_child_models_of_state
def extract_child_models_of_state(state_m, new_state_class): """Retrieve child models of state model The function extracts the child state and state element models of the given state model into a dict. It only extracts those properties that are required for a state of type `new_state_class`. Transitions are always left out. :param state_m: state model of which children are to be extracted from :param new_state_class: The type of the new class :return: """ # check if root state and which type of state assert isinstance(state_m, StateModel) assert issubclass(new_state_class, State) orig_state = state_m.state # only here to get the input parameter of the Core-function current_state_is_container = isinstance(orig_state, ContainerState) new_state_is_container = issubclass(new_state_class, ContainerState) # define which model references to hold for new state required_model_properties = ['input_data_ports', 'output_data_ports', 'outcomes', 'income'] obsolete_model_properties = [] if current_state_is_container and new_state_is_container: # hold some additional references # transition are removed when changing the state type, thus do not copy them required_model_properties.extend(['states', 'data_flows', 'scoped_variables']) obsolete_model_properties.append('transitions') elif current_state_is_container: obsolete_model_properties.extend(['states', 'transitions', 'data_flows', 'scoped_variables']) def get_element_list(state_m, prop_name): if prop_name == 'income': return [state_m.income] wrapper = getattr(state_m, prop_name) # ._obj is needed as gaphas wraps observable lists and dicts into a gaphas.support.ObsWrapper list_or_dict = wrapper._obj if isinstance(list_or_dict, list): return list_or_dict[:] # copy list return list(list_or_dict.values()) # dict required_child_models = {} for prop_name in required_model_properties: required_child_models[prop_name] = get_element_list(state_m, prop_name) obsolete_child_models = {} for prop_name in obsolete_model_properties: obsolete_child_models[prop_name] = get_element_list(state_m, prop_name) # Special handling of BarrierState, which includes the DeciderState that always becomes obsolete if isinstance(state_m, ContainerStateModel): decider_state_m = state_m.states.get(UNIQUE_DECIDER_STATE_ID, None) if decider_state_m: if new_state_is_container: required_child_models['states'].remove(decider_state_m) obsolete_child_models['states'] = [decider_state_m] return required_child_models, obsolete_child_models
python
def extract_child_models_of_state(state_m, new_state_class): """Retrieve child models of state model The function extracts the child state and state element models of the given state model into a dict. It only extracts those properties that are required for a state of type `new_state_class`. Transitions are always left out. :param state_m: state model of which children are to be extracted from :param new_state_class: The type of the new class :return: """ # check if root state and which type of state assert isinstance(state_m, StateModel) assert issubclass(new_state_class, State) orig_state = state_m.state # only here to get the input parameter of the Core-function current_state_is_container = isinstance(orig_state, ContainerState) new_state_is_container = issubclass(new_state_class, ContainerState) # define which model references to hold for new state required_model_properties = ['input_data_ports', 'output_data_ports', 'outcomes', 'income'] obsolete_model_properties = [] if current_state_is_container and new_state_is_container: # hold some additional references # transition are removed when changing the state type, thus do not copy them required_model_properties.extend(['states', 'data_flows', 'scoped_variables']) obsolete_model_properties.append('transitions') elif current_state_is_container: obsolete_model_properties.extend(['states', 'transitions', 'data_flows', 'scoped_variables']) def get_element_list(state_m, prop_name): if prop_name == 'income': return [state_m.income] wrapper = getattr(state_m, prop_name) # ._obj is needed as gaphas wraps observable lists and dicts into a gaphas.support.ObsWrapper list_or_dict = wrapper._obj if isinstance(list_or_dict, list): return list_or_dict[:] # copy list return list(list_or_dict.values()) # dict required_child_models = {} for prop_name in required_model_properties: required_child_models[prop_name] = get_element_list(state_m, prop_name) obsolete_child_models = {} for prop_name in obsolete_model_properties: obsolete_child_models[prop_name] = get_element_list(state_m, prop_name) # Special handling of BarrierState, which includes the DeciderState that always becomes obsolete if isinstance(state_m, ContainerStateModel): decider_state_m = state_m.states.get(UNIQUE_DECIDER_STATE_ID, None) if decider_state_m: if new_state_is_container: required_child_models['states'].remove(decider_state_m) obsolete_child_models['states'] = [decider_state_m] return required_child_models, obsolete_child_models
['def', 'extract_child_models_of_state', '(', 'state_m', ',', 'new_state_class', ')', ':', '# check if root state and which type of state', 'assert', 'isinstance', '(', 'state_m', ',', 'StateModel', ')', 'assert', 'issubclass', '(', 'new_state_class', ',', 'State', ')', 'orig_state', '=', 'state_m', '.', 'state', '# only here to get the input parameter of the Core-function', 'current_state_is_container', '=', 'isinstance', '(', 'orig_state', ',', 'ContainerState', ')', 'new_state_is_container', '=', 'issubclass', '(', 'new_state_class', ',', 'ContainerState', ')', '# define which model references to hold for new state', 'required_model_properties', '=', '[', "'input_data_ports'", ',', "'output_data_ports'", ',', "'outcomes'", ',', "'income'", ']', 'obsolete_model_properties', '=', '[', ']', 'if', 'current_state_is_container', 'and', 'new_state_is_container', ':', '# hold some additional references', '# transition are removed when changing the state type, thus do not copy them', 'required_model_properties', '.', 'extend', '(', '[', "'states'", ',', "'data_flows'", ',', "'scoped_variables'", ']', ')', 'obsolete_model_properties', '.', 'append', '(', "'transitions'", ')', 'elif', 'current_state_is_container', ':', 'obsolete_model_properties', '.', 'extend', '(', '[', "'states'", ',', "'transitions'", ',', "'data_flows'", ',', "'scoped_variables'", ']', ')', 'def', 'get_element_list', '(', 'state_m', ',', 'prop_name', ')', ':', 'if', 'prop_name', '==', "'income'", ':', 'return', '[', 'state_m', '.', 'income', ']', 'wrapper', '=', 'getattr', '(', 'state_m', ',', 'prop_name', ')', '# ._obj is needed as gaphas wraps observable lists and dicts into a gaphas.support.ObsWrapper', 'list_or_dict', '=', 'wrapper', '.', '_obj', 'if', 'isinstance', '(', 'list_or_dict', ',', 'list', ')', ':', 'return', 'list_or_dict', '[', ':', ']', '# copy list', 'return', 'list', '(', 'list_or_dict', '.', 'values', '(', ')', ')', '# dict', 'required_child_models', '=', '{', '}', 'for', 'prop_name', 'in', 'required_model_properties', ':', 'required_child_models', '[', 'prop_name', ']', '=', 'get_element_list', '(', 'state_m', ',', 'prop_name', ')', 'obsolete_child_models', '=', '{', '}', 'for', 'prop_name', 'in', 'obsolete_model_properties', ':', 'obsolete_child_models', '[', 'prop_name', ']', '=', 'get_element_list', '(', 'state_m', ',', 'prop_name', ')', '# Special handling of BarrierState, which includes the DeciderState that always becomes obsolete', 'if', 'isinstance', '(', 'state_m', ',', 'ContainerStateModel', ')', ':', 'decider_state_m', '=', 'state_m', '.', 'states', '.', 'get', '(', 'UNIQUE_DECIDER_STATE_ID', ',', 'None', ')', 'if', 'decider_state_m', ':', 'if', 'new_state_is_container', ':', 'required_child_models', '[', "'states'", ']', '.', 'remove', '(', 'decider_state_m', ')', 'obsolete_child_models', '[', "'states'", ']', '=', '[', 'decider_state_m', ']', 'return', 'required_child_models', ',', 'obsolete_child_models']
Retrieve child models of state model The function extracts the child state and state element models of the given state model into a dict. It only extracts those properties that are required for a state of type `new_state_class`. Transitions are always left out. :param state_m: state model of which children are to be extracted from :param new_state_class: The type of the new class :return:
['Retrieve', 'child', 'models', 'of', 'state', 'model']
train
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/helpers/state.py#L233-L286
1,057
bigchaindb/bigchaindb
bigchaindb/lib.py
BigchainDB.get_transactions_filtered
def get_transactions_filtered(self, asset_id, operation=None): """Get a list of transactions filtered on some criteria """ txids = backend.query.get_txids_filtered(self.connection, asset_id, operation) for txid in txids: yield self.get_transaction(txid)
python
def get_transactions_filtered(self, asset_id, operation=None): """Get a list of transactions filtered on some criteria """ txids = backend.query.get_txids_filtered(self.connection, asset_id, operation) for txid in txids: yield self.get_transaction(txid)
['def', 'get_transactions_filtered', '(', 'self', ',', 'asset_id', ',', 'operation', '=', 'None', ')', ':', 'txids', '=', 'backend', '.', 'query', '.', 'get_txids_filtered', '(', 'self', '.', 'connection', ',', 'asset_id', ',', 'operation', ')', 'for', 'txid', 'in', 'txids', ':', 'yield', 'self', '.', 'get_transaction', '(', 'txid', ')']
Get a list of transactions filtered on some criteria
['Get', 'a', 'list', 'of', 'transactions', 'filtered', 'on', 'some', 'criteria']
train
https://github.com/bigchaindb/bigchaindb/blob/835fdfcf598918f76139e3b88ee33dd157acaaa7/bigchaindb/lib.py#L260-L266
1,058
CivicSpleen/ambry
ambry/bundle/bundle.py
Bundle._ingest_source
def _ingest_source(self, source, ps, force=None): """Ingest a single source""" from ambry.bundle.process import call_interval try: from ambry.orm.exc import NotFoundError if not source.is_partition and source.datafile.exists: if not source.datafile.is_finalized: source.datafile.remove() elif force: source.datafile.remove() else: ps.update( message='Source {} already ingested, skipping'.format(source.name), state='skipped') return True if source.is_partition: # Check if the partition exists try: self.library.partition(source.ref) except NotFoundError: # Maybe it is an internal reference, in which case we can just delay # until the partition is built ps.update(message="Not Ingesting {}: referenced partition '{}' does not exist" .format(source.name, source.ref), state='skipped') return True source.state = source.STATES.INGESTING iterable_source, source_pipe = self.source_pipe(source, ps) if not source.is_ingestible: ps.update(message='Not an ingestiable source: {}'.format(source.name), state='skipped', source=source) source.state = source.STATES.NOTINGESTABLE return True ps.update('Ingesting {} from {}'.format(source.spec.name, source.url or source.generator), item_type='rows', item_count=0) @call_interval(5) def ingest_progress_f(i): (desc, n_records, total, rate) = source.datafile.report_progress() ps.update( message='Ingesting {}: rate: {}'.format(source.spec.name, rate), item_count=n_records) source.datafile.load_rows(iterable_source, callback=ingest_progress_f, limit=500 if self.limited_run else None, intuit_type=True, run_stats=False) if source.datafile.meta['warnings']: for w in source.datafile.meta['warnings']: self.error("Ingestion error: {}".format(w)) ps.update(message='Ingested to {}'.format(source.datafile.syspath)) ps.update(message='Updating tables and specs for {}'.format(source.name)) # source.update_table() # Generate the source tables. source.update_spec() # Update header_lines, start_line, etc. if self.limited_run: source.end_line = None # Otherwize, it will be 500 self.build_source_files.sources.objects_to_record() ps.update(message='Ingested {}'.format(source.datafile.path), state='done') source.state = source.STATES.INGESTED self.commit() return True except Exception as e: import traceback from ambry.util import qualified_class_name ps.update( message='Source {} failed with exception: {}'.format(source.name, e), exception_class=qualified_class_name(e), exception_trace=str(traceback.format_exc()), state='error' ) source.state = source.STATES.INGESTING + '_error' self.commit() return False
python
def _ingest_source(self, source, ps, force=None): """Ingest a single source""" from ambry.bundle.process import call_interval try: from ambry.orm.exc import NotFoundError if not source.is_partition and source.datafile.exists: if not source.datafile.is_finalized: source.datafile.remove() elif force: source.datafile.remove() else: ps.update( message='Source {} already ingested, skipping'.format(source.name), state='skipped') return True if source.is_partition: # Check if the partition exists try: self.library.partition(source.ref) except NotFoundError: # Maybe it is an internal reference, in which case we can just delay # until the partition is built ps.update(message="Not Ingesting {}: referenced partition '{}' does not exist" .format(source.name, source.ref), state='skipped') return True source.state = source.STATES.INGESTING iterable_source, source_pipe = self.source_pipe(source, ps) if not source.is_ingestible: ps.update(message='Not an ingestiable source: {}'.format(source.name), state='skipped', source=source) source.state = source.STATES.NOTINGESTABLE return True ps.update('Ingesting {} from {}'.format(source.spec.name, source.url or source.generator), item_type='rows', item_count=0) @call_interval(5) def ingest_progress_f(i): (desc, n_records, total, rate) = source.datafile.report_progress() ps.update( message='Ingesting {}: rate: {}'.format(source.spec.name, rate), item_count=n_records) source.datafile.load_rows(iterable_source, callback=ingest_progress_f, limit=500 if self.limited_run else None, intuit_type=True, run_stats=False) if source.datafile.meta['warnings']: for w in source.datafile.meta['warnings']: self.error("Ingestion error: {}".format(w)) ps.update(message='Ingested to {}'.format(source.datafile.syspath)) ps.update(message='Updating tables and specs for {}'.format(source.name)) # source.update_table() # Generate the source tables. source.update_spec() # Update header_lines, start_line, etc. if self.limited_run: source.end_line = None # Otherwize, it will be 500 self.build_source_files.sources.objects_to_record() ps.update(message='Ingested {}'.format(source.datafile.path), state='done') source.state = source.STATES.INGESTED self.commit() return True except Exception as e: import traceback from ambry.util import qualified_class_name ps.update( message='Source {} failed with exception: {}'.format(source.name, e), exception_class=qualified_class_name(e), exception_trace=str(traceback.format_exc()), state='error' ) source.state = source.STATES.INGESTING + '_error' self.commit() return False
['def', '_ingest_source', '(', 'self', ',', 'source', ',', 'ps', ',', 'force', '=', 'None', ')', ':', 'from', 'ambry', '.', 'bundle', '.', 'process', 'import', 'call_interval', 'try', ':', 'from', 'ambry', '.', 'orm', '.', 'exc', 'import', 'NotFoundError', 'if', 'not', 'source', '.', 'is_partition', 'and', 'source', '.', 'datafile', '.', 'exists', ':', 'if', 'not', 'source', '.', 'datafile', '.', 'is_finalized', ':', 'source', '.', 'datafile', '.', 'remove', '(', ')', 'elif', 'force', ':', 'source', '.', 'datafile', '.', 'remove', '(', ')', 'else', ':', 'ps', '.', 'update', '(', 'message', '=', "'Source {} already ingested, skipping'", '.', 'format', '(', 'source', '.', 'name', ')', ',', 'state', '=', "'skipped'", ')', 'return', 'True', 'if', 'source', '.', 'is_partition', ':', '# Check if the partition exists', 'try', ':', 'self', '.', 'library', '.', 'partition', '(', 'source', '.', 'ref', ')', 'except', 'NotFoundError', ':', '# Maybe it is an internal reference, in which case we can just delay', '# until the partition is built', 'ps', '.', 'update', '(', 'message', '=', '"Not Ingesting {}: referenced partition \'{}\' does not exist"', '.', 'format', '(', 'source', '.', 'name', ',', 'source', '.', 'ref', ')', ',', 'state', '=', "'skipped'", ')', 'return', 'True', 'source', '.', 'state', '=', 'source', '.', 'STATES', '.', 'INGESTING', 'iterable_source', ',', 'source_pipe', '=', 'self', '.', 'source_pipe', '(', 'source', ',', 'ps', ')', 'if', 'not', 'source', '.', 'is_ingestible', ':', 'ps', '.', 'update', '(', 'message', '=', "'Not an ingestiable source: {}'", '.', 'format', '(', 'source', '.', 'name', ')', ',', 'state', '=', "'skipped'", ',', 'source', '=', 'source', ')', 'source', '.', 'state', '=', 'source', '.', 'STATES', '.', 'NOTINGESTABLE', 'return', 'True', 'ps', '.', 'update', '(', "'Ingesting {} from {}'", '.', 'format', '(', 'source', '.', 'spec', '.', 'name', ',', 'source', '.', 'url', 'or', 'source', '.', 'generator', ')', ',', 'item_type', '=', "'rows'", ',', 'item_count', '=', '0', ')', '@', 'call_interval', '(', '5', ')', 'def', 'ingest_progress_f', '(', 'i', ')', ':', '(', 'desc', ',', 'n_records', ',', 'total', ',', 'rate', ')', '=', 'source', '.', 'datafile', '.', 'report_progress', '(', ')', 'ps', '.', 'update', '(', 'message', '=', "'Ingesting {}: rate: {}'", '.', 'format', '(', 'source', '.', 'spec', '.', 'name', ',', 'rate', ')', ',', 'item_count', '=', 'n_records', ')', 'source', '.', 'datafile', '.', 'load_rows', '(', 'iterable_source', ',', 'callback', '=', 'ingest_progress_f', ',', 'limit', '=', '500', 'if', 'self', '.', 'limited_run', 'else', 'None', ',', 'intuit_type', '=', 'True', ',', 'run_stats', '=', 'False', ')', 'if', 'source', '.', 'datafile', '.', 'meta', '[', "'warnings'", ']', ':', 'for', 'w', 'in', 'source', '.', 'datafile', '.', 'meta', '[', "'warnings'", ']', ':', 'self', '.', 'error', '(', '"Ingestion error: {}"', '.', 'format', '(', 'w', ')', ')', 'ps', '.', 'update', '(', 'message', '=', "'Ingested to {}'", '.', 'format', '(', 'source', '.', 'datafile', '.', 'syspath', ')', ')', 'ps', '.', 'update', '(', 'message', '=', "'Updating tables and specs for {}'", '.', 'format', '(', 'source', '.', 'name', ')', ')', '# source.update_table() # Generate the source tables.', 'source', '.', 'update_spec', '(', ')', '# Update header_lines, start_line, etc.', 'if', 'self', '.', 'limited_run', ':', 'source', '.', 'end_line', '=', 'None', '# Otherwize, it will be 500', 'self', '.', 'build_source_files', '.', 'sources', '.', 'objects_to_record', '(', ')', 'ps', '.', 'update', '(', 'message', '=', "'Ingested {}'", '.', 'format', '(', 'source', '.', 'datafile', '.', 'path', ')', ',', 'state', '=', "'done'", ')', 'source', '.', 'state', '=', 'source', '.', 'STATES', '.', 'INGESTED', 'self', '.', 'commit', '(', ')', 'return', 'True', 'except', 'Exception', 'as', 'e', ':', 'import', 'traceback', 'from', 'ambry', '.', 'util', 'import', 'qualified_class_name', 'ps', '.', 'update', '(', 'message', '=', "'Source {} failed with exception: {}'", '.', 'format', '(', 'source', '.', 'name', ',', 'e', ')', ',', 'exception_class', '=', 'qualified_class_name', '(', 'e', ')', ',', 'exception_trace', '=', 'str', '(', 'traceback', '.', 'format_exc', '(', ')', ')', ',', 'state', '=', "'error'", ')', 'source', '.', 'state', '=', 'source', '.', 'STATES', '.', 'INGESTING', '+', "'_error'", 'self', '.', 'commit', '(', ')', 'return', 'False']
Ingest a single source
['Ingest', 'a', 'single', 'source']
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/bundle/bundle.py#L1900-L1992
1,059
sdispater/orator
orator/schema/builder.py
SchemaBuilder.drop
def drop(self, table): """ Drop a table from the schema. :param table: The table :type table: str """ blueprint = self._create_blueprint(table) blueprint.drop() self._build(blueprint)
python
def drop(self, table): """ Drop a table from the schema. :param table: The table :type table: str """ blueprint = self._create_blueprint(table) blueprint.drop() self._build(blueprint)
['def', 'drop', '(', 'self', ',', 'table', ')', ':', 'blueprint', '=', 'self', '.', '_create_blueprint', '(', 'table', ')', 'blueprint', '.', 'drop', '(', ')', 'self', '.', '_build', '(', 'blueprint', ')']
Drop a table from the schema. :param table: The table :type table: str
['Drop', 'a', 'table', 'from', 'the', 'schema', '.']
train
https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/schema/builder.py#L103-L114
1,060
twisted/twistedchecker
twistedchecker/core/runner.py
Runner.allowPatternsForNameChecking
def allowPatternsForNameChecking(self, patternsFunc, patternsClass): """ Allow name exceptions by given patterns. @param patternsFunc: patterns of special function names @param patternsClass: patterns of special class names """ cfgParser = self.linter.cfgfile_parser nameChecker = self.getCheckerByName(NameChecker) if not nameChecker: return if patternsFunc: regexFuncAdd = "|((%s).+)$" % "|".join(patternsFunc) else: regexFuncAdd = "" if patternsClass: regexClassAdd = "|((%s).+)$" % "|".join(patternsClass) else: regexClassAdd = "" # Modify regex for function, method and class name. regexMethod = cfgParser.get("BASIC", "method-rgx") + regexFuncAdd regexFunction = cfgParser.get("BASIC", "function-rgx") + regexFuncAdd regexClass = cfgParser.get("BASIC", "class-rgx") + regexClassAdd # Save to config parser. cfgParser.set("BASIC", "method-rgx", regexMethod) cfgParser.set("BASIC", "function-rgx", regexFunction) cfgParser.set("BASIC", "class-rgx", regexClass) # Save to name checker. nameChecker.config.method_rgx = re.compile(regexMethod) nameChecker.config.function_rgx = re.compile(regexFunction) nameChecker.config.class_rgx = re.compile(regexClass)
python
def allowPatternsForNameChecking(self, patternsFunc, patternsClass): """ Allow name exceptions by given patterns. @param patternsFunc: patterns of special function names @param patternsClass: patterns of special class names """ cfgParser = self.linter.cfgfile_parser nameChecker = self.getCheckerByName(NameChecker) if not nameChecker: return if patternsFunc: regexFuncAdd = "|((%s).+)$" % "|".join(patternsFunc) else: regexFuncAdd = "" if patternsClass: regexClassAdd = "|((%s).+)$" % "|".join(patternsClass) else: regexClassAdd = "" # Modify regex for function, method and class name. regexMethod = cfgParser.get("BASIC", "method-rgx") + regexFuncAdd regexFunction = cfgParser.get("BASIC", "function-rgx") + regexFuncAdd regexClass = cfgParser.get("BASIC", "class-rgx") + regexClassAdd # Save to config parser. cfgParser.set("BASIC", "method-rgx", regexMethod) cfgParser.set("BASIC", "function-rgx", regexFunction) cfgParser.set("BASIC", "class-rgx", regexClass) # Save to name checker. nameChecker.config.method_rgx = re.compile(regexMethod) nameChecker.config.function_rgx = re.compile(regexFunction) nameChecker.config.class_rgx = re.compile(regexClass)
['def', 'allowPatternsForNameChecking', '(', 'self', ',', 'patternsFunc', ',', 'patternsClass', ')', ':', 'cfgParser', '=', 'self', '.', 'linter', '.', 'cfgfile_parser', 'nameChecker', '=', 'self', '.', 'getCheckerByName', '(', 'NameChecker', ')', 'if', 'not', 'nameChecker', ':', 'return', 'if', 'patternsFunc', ':', 'regexFuncAdd', '=', '"|((%s).+)$"', '%', '"|"', '.', 'join', '(', 'patternsFunc', ')', 'else', ':', 'regexFuncAdd', '=', '""', 'if', 'patternsClass', ':', 'regexClassAdd', '=', '"|((%s).+)$"', '%', '"|"', '.', 'join', '(', 'patternsClass', ')', 'else', ':', 'regexClassAdd', '=', '""', '# Modify regex for function, method and class name.', 'regexMethod', '=', 'cfgParser', '.', 'get', '(', '"BASIC"', ',', '"method-rgx"', ')', '+', 'regexFuncAdd', 'regexFunction', '=', 'cfgParser', '.', 'get', '(', '"BASIC"', ',', '"function-rgx"', ')', '+', 'regexFuncAdd', 'regexClass', '=', 'cfgParser', '.', 'get', '(', '"BASIC"', ',', '"class-rgx"', ')', '+', 'regexClassAdd', '# Save to config parser.', 'cfgParser', '.', 'set', '(', '"BASIC"', ',', '"method-rgx"', ',', 'regexMethod', ')', 'cfgParser', '.', 'set', '(', '"BASIC"', ',', '"function-rgx"', ',', 'regexFunction', ')', 'cfgParser', '.', 'set', '(', '"BASIC"', ',', '"class-rgx"', ',', 'regexClass', ')', '# Save to name checker.', 'nameChecker', '.', 'config', '.', 'method_rgx', '=', 're', '.', 'compile', '(', 'regexMethod', ')', 'nameChecker', '.', 'config', '.', 'function_rgx', '=', 're', '.', 'compile', '(', 'regexFunction', ')', 'nameChecker', '.', 'config', '.', 'class_rgx', '=', 're', '.', 'compile', '(', 'regexClass', ')']
Allow name exceptions by given patterns. @param patternsFunc: patterns of special function names @param patternsClass: patterns of special class names
['Allow', 'name', 'exceptions', 'by', 'given', 'patterns', '.']
train
https://github.com/twisted/twistedchecker/blob/80060e1c07cf5d67d747dbec8ec0e5ee913e8929/twistedchecker/core/runner.py#L205-L235
1,061
twisted/txaws
txaws/ec2/client.py
Parser.describe_instances
def describe_instances(self, xml_bytes): """ Parse the reservations XML payload that is returned from an AWS describeInstances API call. Instead of returning the reservations as the "top-most" object, we return the object that most developers and their code will be interested in: the instances. In instances reservation is available on the instance object. The following instance attributes are optional: * ami_launch_index * key_name * kernel_id * product_codes * ramdisk_id * reason @param xml_bytes: raw XML payload from AWS. """ root = XML(xml_bytes) results = [] # May be a more elegant way to do this: for reservation_data in root.find("reservationSet"): # Create a reservation object with the parsed data. reservation = model.Reservation( reservation_id=reservation_data.findtext("reservationId"), owner_id=reservation_data.findtext("ownerId")) # Get the list of instances. instances = self.instances_set( reservation_data, reservation) results.extend(instances) return results
python
def describe_instances(self, xml_bytes): """ Parse the reservations XML payload that is returned from an AWS describeInstances API call. Instead of returning the reservations as the "top-most" object, we return the object that most developers and their code will be interested in: the instances. In instances reservation is available on the instance object. The following instance attributes are optional: * ami_launch_index * key_name * kernel_id * product_codes * ramdisk_id * reason @param xml_bytes: raw XML payload from AWS. """ root = XML(xml_bytes) results = [] # May be a more elegant way to do this: for reservation_data in root.find("reservationSet"): # Create a reservation object with the parsed data. reservation = model.Reservation( reservation_id=reservation_data.findtext("reservationId"), owner_id=reservation_data.findtext("ownerId")) # Get the list of instances. instances = self.instances_set( reservation_data, reservation) results.extend(instances) return results
['def', 'describe_instances', '(', 'self', ',', 'xml_bytes', ')', ':', 'root', '=', 'XML', '(', 'xml_bytes', ')', 'results', '=', '[', ']', '# May be a more elegant way to do this:', 'for', 'reservation_data', 'in', 'root', '.', 'find', '(', '"reservationSet"', ')', ':', '# Create a reservation object with the parsed data.', 'reservation', '=', 'model', '.', 'Reservation', '(', 'reservation_id', '=', 'reservation_data', '.', 'findtext', '(', '"reservationId"', ')', ',', 'owner_id', '=', 'reservation_data', '.', 'findtext', '(', '"ownerId"', ')', ')', '# Get the list of instances.', 'instances', '=', 'self', '.', 'instances_set', '(', 'reservation_data', ',', 'reservation', ')', 'results', '.', 'extend', '(', 'instances', ')', 'return', 'results']
Parse the reservations XML payload that is returned from an AWS describeInstances API call. Instead of returning the reservations as the "top-most" object, we return the object that most developers and their code will be interested in: the instances. In instances reservation is available on the instance object. The following instance attributes are optional: * ami_launch_index * key_name * kernel_id * product_codes * ramdisk_id * reason @param xml_bytes: raw XML payload from AWS.
['Parse', 'the', 'reservations', 'XML', 'payload', 'that', 'is', 'returned', 'from', 'an', 'AWS', 'describeInstances', 'API', 'call', '.']
train
https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/ec2/client.py#L626-L658
1,062
saltstack/salt
salt/grains/napalm.py
host
def host(proxy=None): ''' This grain is set by the NAPALM grain module only when running in a proxy minion. When Salt is installed directly on the network device, thus running a regular minion, the ``host`` grain provides the physical hostname of the network device, as it would be on an ordinary minion server. When running in a proxy minion, ``host`` points to the value configured in the pillar: :mod:`NAPALM proxy module <salt.proxy.napalm>`. .. note:: The diference between ``host`` and ``hostname`` is that ``host`` provides the physical location - either domain name or IP address, while ``hostname`` provides the hostname as configured on the device. They are not necessarily the same. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt 'device*' grains.get host Output: .. code-block:: yaml device1: ip-172-31-13-136.us-east-2.compute.internal device2: ip-172-31-11-193.us-east-2.compute.internal device3: ip-172-31-2-181.us-east-2.compute.internal ''' if proxy and salt.utils.napalm.is_proxy(__opts__): # this grain is set only when running in a proxy minion # otherwise will use the default Salt grains return {'host': _get_device_grain('hostname', proxy=proxy)}
python
def host(proxy=None): ''' This grain is set by the NAPALM grain module only when running in a proxy minion. When Salt is installed directly on the network device, thus running a regular minion, the ``host`` grain provides the physical hostname of the network device, as it would be on an ordinary minion server. When running in a proxy minion, ``host`` points to the value configured in the pillar: :mod:`NAPALM proxy module <salt.proxy.napalm>`. .. note:: The diference between ``host`` and ``hostname`` is that ``host`` provides the physical location - either domain name or IP address, while ``hostname`` provides the hostname as configured on the device. They are not necessarily the same. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt 'device*' grains.get host Output: .. code-block:: yaml device1: ip-172-31-13-136.us-east-2.compute.internal device2: ip-172-31-11-193.us-east-2.compute.internal device3: ip-172-31-2-181.us-east-2.compute.internal ''' if proxy and salt.utils.napalm.is_proxy(__opts__): # this grain is set only when running in a proxy minion # otherwise will use the default Salt grains return {'host': _get_device_grain('hostname', proxy=proxy)}
['def', 'host', '(', 'proxy', '=', 'None', ')', ':', 'if', 'proxy', 'and', 'salt', '.', 'utils', '.', 'napalm', '.', 'is_proxy', '(', '__opts__', ')', ':', '# this grain is set only when running in a proxy minion', '# otherwise will use the default Salt grains', 'return', '{', "'host'", ':', '_get_device_grain', '(', "'hostname'", ',', 'proxy', '=', 'proxy', ')', '}']
This grain is set by the NAPALM grain module only when running in a proxy minion. When Salt is installed directly on the network device, thus running a regular minion, the ``host`` grain provides the physical hostname of the network device, as it would be on an ordinary minion server. When running in a proxy minion, ``host`` points to the value configured in the pillar: :mod:`NAPALM proxy module <salt.proxy.napalm>`. .. note:: The diference between ``host`` and ``hostname`` is that ``host`` provides the physical location - either domain name or IP address, while ``hostname`` provides the hostname as configured on the device. They are not necessarily the same. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt 'device*' grains.get host Output: .. code-block:: yaml device1: ip-172-31-13-136.us-east-2.compute.internal device2: ip-172-31-11-193.us-east-2.compute.internal device3: ip-172-31-2-181.us-east-2.compute.internal
['This', 'grain', 'is', 'set', 'by', 'the', 'NAPALM', 'grain', 'module', 'only', 'when', 'running', 'in', 'a', 'proxy', 'minion', '.', 'When', 'Salt', 'is', 'installed', 'directly', 'on', 'the', 'network', 'device', 'thus', 'running', 'a', 'regular', 'minion', 'the', 'host', 'grain', 'provides', 'the', 'physical', 'hostname', 'of', 'the', 'network', 'device', 'as', 'it', 'would', 'be', 'on', 'an', 'ordinary', 'minion', 'server', '.', 'When', 'running', 'in', 'a', 'proxy', 'minion', 'host', 'points', 'to', 'the', 'value', 'configured', 'in', 'the', 'pillar', ':', ':', 'mod', ':', 'NAPALM', 'proxy', 'module', '<salt', '.', 'proxy', '.', 'napalm', '>', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/grains/napalm.py#L316-L356
1,063
codebynumbers/ftpretty
ftpretty.py
ftpretty.put
def put(self, local, remote, contents=None, quiet=False): """ Puts a local file (or contents) on to the FTP server local can be: a string: path to inpit file a file: opened for reading None: contents are pushed """ remote_dir = os.path.dirname(remote) remote_file = os.path.basename(local)\ if remote.endswith('/') else os.path.basename(remote) if contents: # local is ignored if contents is set local_file = buffer_type(contents) elif isinstance(local, file_type): local_file = local else: local_file = open(local, 'rb') current = self.conn.pwd() self.descend(remote_dir, force=True) size = 0 try: self.conn.storbinary('STOR %s' % remote_file, local_file) size = self.conn.size(remote_file) except: if not quiet: raise finally: local_file.close() self.conn.cwd(current) return size
python
def put(self, local, remote, contents=None, quiet=False): """ Puts a local file (or contents) on to the FTP server local can be: a string: path to inpit file a file: opened for reading None: contents are pushed """ remote_dir = os.path.dirname(remote) remote_file = os.path.basename(local)\ if remote.endswith('/') else os.path.basename(remote) if contents: # local is ignored if contents is set local_file = buffer_type(contents) elif isinstance(local, file_type): local_file = local else: local_file = open(local, 'rb') current = self.conn.pwd() self.descend(remote_dir, force=True) size = 0 try: self.conn.storbinary('STOR %s' % remote_file, local_file) size = self.conn.size(remote_file) except: if not quiet: raise finally: local_file.close() self.conn.cwd(current) return size
['def', 'put', '(', 'self', ',', 'local', ',', 'remote', ',', 'contents', '=', 'None', ',', 'quiet', '=', 'False', ')', ':', 'remote_dir', '=', 'os', '.', 'path', '.', 'dirname', '(', 'remote', ')', 'remote_file', '=', 'os', '.', 'path', '.', 'basename', '(', 'local', ')', 'if', 'remote', '.', 'endswith', '(', "'/'", ')', 'else', 'os', '.', 'path', '.', 'basename', '(', 'remote', ')', 'if', 'contents', ':', '# local is ignored if contents is set', 'local_file', '=', 'buffer_type', '(', 'contents', ')', 'elif', 'isinstance', '(', 'local', ',', 'file_type', ')', ':', 'local_file', '=', 'local', 'else', ':', 'local_file', '=', 'open', '(', 'local', ',', "'rb'", ')', 'current', '=', 'self', '.', 'conn', '.', 'pwd', '(', ')', 'self', '.', 'descend', '(', 'remote_dir', ',', 'force', '=', 'True', ')', 'size', '=', '0', 'try', ':', 'self', '.', 'conn', '.', 'storbinary', '(', "'STOR %s'", '%', 'remote_file', ',', 'local_file', ')', 'size', '=', 'self', '.', 'conn', '.', 'size', '(', 'remote_file', ')', 'except', ':', 'if', 'not', 'quiet', ':', 'raise', 'finally', ':', 'local_file', '.', 'close', '(', ')', 'self', '.', 'conn', '.', 'cwd', '(', 'current', ')', 'return', 'size']
Puts a local file (or contents) on to the FTP server local can be: a string: path to inpit file a file: opened for reading None: contents are pushed
['Puts', 'a', 'local', 'file', '(', 'or', 'contents', ')', 'on', 'to', 'the', 'FTP', 'server']
train
https://github.com/codebynumbers/ftpretty/blob/5ee6e2cc679199ff52d1cd2ed1b0613f12aa6f67/ftpretty.py#L86-L118
1,064
carpedm20/fbchat
fbchat/_client.py
Client.fetchGroupInfo
def fetchGroupInfo(self, *group_ids): """ Get groups' info from IDs, unordered :param group_ids: One or more group ID(s) to query :return: :class:`models.Group` objects, labeled by their ID :rtype: dict :raises: FBchatException if request failed """ threads = self.fetchThreadInfo(*group_ids) groups = {} for id_, thread in threads.items(): if thread.type == ThreadType.GROUP: groups[id_] = thread else: raise FBchatUserError("Thread {} was not a group".format(thread)) return groups
python
def fetchGroupInfo(self, *group_ids): """ Get groups' info from IDs, unordered :param group_ids: One or more group ID(s) to query :return: :class:`models.Group` objects, labeled by their ID :rtype: dict :raises: FBchatException if request failed """ threads = self.fetchThreadInfo(*group_ids) groups = {} for id_, thread in threads.items(): if thread.type == ThreadType.GROUP: groups[id_] = thread else: raise FBchatUserError("Thread {} was not a group".format(thread)) return groups
['def', 'fetchGroupInfo', '(', 'self', ',', '*', 'group_ids', ')', ':', 'threads', '=', 'self', '.', 'fetchThreadInfo', '(', '*', 'group_ids', ')', 'groups', '=', '{', '}', 'for', 'id_', ',', 'thread', 'in', 'threads', '.', 'items', '(', ')', ':', 'if', 'thread', '.', 'type', '==', 'ThreadType', '.', 'GROUP', ':', 'groups', '[', 'id_', ']', '=', 'thread', 'else', ':', 'raise', 'FBchatUserError', '(', '"Thread {} was not a group"', '.', 'format', '(', 'thread', ')', ')', 'return', 'groups']
Get groups' info from IDs, unordered :param group_ids: One or more group ID(s) to query :return: :class:`models.Group` objects, labeled by their ID :rtype: dict :raises: FBchatException if request failed
['Get', 'groups', 'info', 'from', 'IDs', 'unordered']
train
https://github.com/carpedm20/fbchat/blob/f480d68b5773473e6daba7f66075ee30e8d737a8/fbchat/_client.py#L922-L939
1,065
seleniumbase/SeleniumBase
seleniumbase/fixtures/base_case.py
BaseCase.add_tour_step
def add_tour_step(self, message, selector=None, name=None, title=None, theme=None, alignment=None, duration=None): """ Allows the user to add tour steps for a website. @Params message - The message to display. selector - The CSS Selector of the Element to attach to. name - If creating multiple tours at the same time, use this to select the tour you wish to add steps to. title - Additional header text that appears above the message. theme - (NON-Bootstrap Tours ONLY) The styling of the tour step. Choose from "light"/"arrows", "dark", "default", "square", and "square-dark". ("arrows" is used if None is selected.) alignment - Choose from "top", "bottom", "left", and "right". ("top" is the default alignment). duration - (Bootstrap Tours ONLY) The amount of time, in seconds, before automatically advancing to the next tour step. """ if not selector: selector = "html" if page_utils.is_xpath_selector(selector): selector = self.convert_to_css_selector(selector, By.XPATH) selector = self.__escape_quotes_if_needed(selector) if not name: name = "default" if name not in self._tour_steps: # By default, will create an IntroJS tour if no tours exist self.create_tour(name=name, theme="introjs") if not title: title = "" title = self.__escape_quotes_if_needed(title) if message: message = self.__escape_quotes_if_needed(message) else: message = "" if not alignment or ( alignment not in ["top", "bottom", "left", "right"]): if "Hopscotch" not in self._tour_steps[name][0]: alignment = "top" else: alignment = "bottom" if "Bootstrap" in self._tour_steps[name][0]: self.__add_bootstrap_tour_step( message, selector=selector, name=name, title=title, alignment=alignment, duration=duration) elif "Hopscotch" in self._tour_steps[name][0]: self.__add_hopscotch_tour_step( message, selector=selector, name=name, title=title, alignment=alignment) elif "IntroJS" in self._tour_steps[name][0]: self.__add_introjs_tour_step( message, selector=selector, name=name, title=title, alignment=alignment) else: self.__add_shepherd_tour_step( message, selector=selector, name=name, title=title, theme=theme, alignment=alignment)
python
def add_tour_step(self, message, selector=None, name=None, title=None, theme=None, alignment=None, duration=None): """ Allows the user to add tour steps for a website. @Params message - The message to display. selector - The CSS Selector of the Element to attach to. name - If creating multiple tours at the same time, use this to select the tour you wish to add steps to. title - Additional header text that appears above the message. theme - (NON-Bootstrap Tours ONLY) The styling of the tour step. Choose from "light"/"arrows", "dark", "default", "square", and "square-dark". ("arrows" is used if None is selected.) alignment - Choose from "top", "bottom", "left", and "right". ("top" is the default alignment). duration - (Bootstrap Tours ONLY) The amount of time, in seconds, before automatically advancing to the next tour step. """ if not selector: selector = "html" if page_utils.is_xpath_selector(selector): selector = self.convert_to_css_selector(selector, By.XPATH) selector = self.__escape_quotes_if_needed(selector) if not name: name = "default" if name not in self._tour_steps: # By default, will create an IntroJS tour if no tours exist self.create_tour(name=name, theme="introjs") if not title: title = "" title = self.__escape_quotes_if_needed(title) if message: message = self.__escape_quotes_if_needed(message) else: message = "" if not alignment or ( alignment not in ["top", "bottom", "left", "right"]): if "Hopscotch" not in self._tour_steps[name][0]: alignment = "top" else: alignment = "bottom" if "Bootstrap" in self._tour_steps[name][0]: self.__add_bootstrap_tour_step( message, selector=selector, name=name, title=title, alignment=alignment, duration=duration) elif "Hopscotch" in self._tour_steps[name][0]: self.__add_hopscotch_tour_step( message, selector=selector, name=name, title=title, alignment=alignment) elif "IntroJS" in self._tour_steps[name][0]: self.__add_introjs_tour_step( message, selector=selector, name=name, title=title, alignment=alignment) else: self.__add_shepherd_tour_step( message, selector=selector, name=name, title=title, theme=theme, alignment=alignment)
['def', 'add_tour_step', '(', 'self', ',', 'message', ',', 'selector', '=', 'None', ',', 'name', '=', 'None', ',', 'title', '=', 'None', ',', 'theme', '=', 'None', ',', 'alignment', '=', 'None', ',', 'duration', '=', 'None', ')', ':', 'if', 'not', 'selector', ':', 'selector', '=', '"html"', 'if', 'page_utils', '.', 'is_xpath_selector', '(', 'selector', ')', ':', 'selector', '=', 'self', '.', 'convert_to_css_selector', '(', 'selector', ',', 'By', '.', 'XPATH', ')', 'selector', '=', 'self', '.', '__escape_quotes_if_needed', '(', 'selector', ')', 'if', 'not', 'name', ':', 'name', '=', '"default"', 'if', 'name', 'not', 'in', 'self', '.', '_tour_steps', ':', '# By default, will create an IntroJS tour if no tours exist', 'self', '.', 'create_tour', '(', 'name', '=', 'name', ',', 'theme', '=', '"introjs"', ')', 'if', 'not', 'title', ':', 'title', '=', '""', 'title', '=', 'self', '.', '__escape_quotes_if_needed', '(', 'title', ')', 'if', 'message', ':', 'message', '=', 'self', '.', '__escape_quotes_if_needed', '(', 'message', ')', 'else', ':', 'message', '=', '""', 'if', 'not', 'alignment', 'or', '(', 'alignment', 'not', 'in', '[', '"top"', ',', '"bottom"', ',', '"left"', ',', '"right"', ']', ')', ':', 'if', '"Hopscotch"', 'not', 'in', 'self', '.', '_tour_steps', '[', 'name', ']', '[', '0', ']', ':', 'alignment', '=', '"top"', 'else', ':', 'alignment', '=', '"bottom"', 'if', '"Bootstrap"', 'in', 'self', '.', '_tour_steps', '[', 'name', ']', '[', '0', ']', ':', 'self', '.', '__add_bootstrap_tour_step', '(', 'message', ',', 'selector', '=', 'selector', ',', 'name', '=', 'name', ',', 'title', '=', 'title', ',', 'alignment', '=', 'alignment', ',', 'duration', '=', 'duration', ')', 'elif', '"Hopscotch"', 'in', 'self', '.', '_tour_steps', '[', 'name', ']', '[', '0', ']', ':', 'self', '.', '__add_hopscotch_tour_step', '(', 'message', ',', 'selector', '=', 'selector', ',', 'name', '=', 'name', ',', 'title', '=', 'title', ',', 'alignment', '=', 'alignment', ')', 'elif', '"IntroJS"', 'in', 'self', '.', '_tour_steps', '[', 'name', ']', '[', '0', ']', ':', 'self', '.', '__add_introjs_tour_step', '(', 'message', ',', 'selector', '=', 'selector', ',', 'name', '=', 'name', ',', 'title', '=', 'title', ',', 'alignment', '=', 'alignment', ')', 'else', ':', 'self', '.', '__add_shepherd_tour_step', '(', 'message', ',', 'selector', '=', 'selector', ',', 'name', '=', 'name', ',', 'title', '=', 'title', ',', 'theme', '=', 'theme', ',', 'alignment', '=', 'alignment', ')']
Allows the user to add tour steps for a website. @Params message - The message to display. selector - The CSS Selector of the Element to attach to. name - If creating multiple tours at the same time, use this to select the tour you wish to add steps to. title - Additional header text that appears above the message. theme - (NON-Bootstrap Tours ONLY) The styling of the tour step. Choose from "light"/"arrows", "dark", "default", "square", and "square-dark". ("arrows" is used if None is selected.) alignment - Choose from "top", "bottom", "left", and "right". ("top" is the default alignment). duration - (Bootstrap Tours ONLY) The amount of time, in seconds, before automatically advancing to the next tour step.
['Allows', 'the', 'user', 'to', 'add', 'tour', 'steps', 'for', 'a', 'website', '.']
train
https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/fixtures/base_case.py#L1028-L1088
1,066
dslackw/slpkg
slpkg/tracking.py
TrackingDeps.run
def run(self): """Run tracking dependencies """ self.msg.resolving() self.repositories() if self.find_pkg: self.dependencies_list.reverse() self.requires = Utils().dimensional_list(self.dependencies_list) self.dependencies = Utils().remove_dbs(self.requires) if self.dependencies == []: self.dependencies = ["No dependencies"] if "--graph=" in self.flag: self.deps_tree() self.msg.done() pkg_len = len(self.name) + 24 print("") # new line at start self.msg.template(pkg_len) print("| Package {0}{1}{2} dependencies :".format( self.cyan, self.name, self.endc)) self.msg.template(pkg_len) print("\\") print(" +---{0}[ Tree of dependencies ]{1}".format(self.yellow, self.endc)) index = 0 for pkg in self.dependencies: if "--check-deps" in self.flag: used = self.check_used(pkg) self.deps_used(pkg, used) used = "{0} {1}{2}{3}".format( "is dependency -->", self.cyan, ", ".join(used), self.endc) else: used = "" index += 1 installed = "" if find_package(pkg + self.meta.sp, self.meta.pkg_path): if self.meta.use_colors in ["off", "OFF"]: installed = "* " print(" |") print(" {0}{1}: {2}{3}{4} {5}{6}".format( "+--", index, self.green, pkg, self.endc, installed, used)) else: print(" |") print(" {0}{1}: {2}{3}{4} {5}".format( "+--", index, self.red, pkg, self.endc, installed)) if self.meta.use_colors in ["off", "OFF"]: print("\n * = Installed\n") else: print("") # new line at end if "--graph=" in self.flag: self.graph() else: self.msg.done() print("\nNo package was found to match\n") raise SystemExit(1)
python
def run(self): """Run tracking dependencies """ self.msg.resolving() self.repositories() if self.find_pkg: self.dependencies_list.reverse() self.requires = Utils().dimensional_list(self.dependencies_list) self.dependencies = Utils().remove_dbs(self.requires) if self.dependencies == []: self.dependencies = ["No dependencies"] if "--graph=" in self.flag: self.deps_tree() self.msg.done() pkg_len = len(self.name) + 24 print("") # new line at start self.msg.template(pkg_len) print("| Package {0}{1}{2} dependencies :".format( self.cyan, self.name, self.endc)) self.msg.template(pkg_len) print("\\") print(" +---{0}[ Tree of dependencies ]{1}".format(self.yellow, self.endc)) index = 0 for pkg in self.dependencies: if "--check-deps" in self.flag: used = self.check_used(pkg) self.deps_used(pkg, used) used = "{0} {1}{2}{3}".format( "is dependency -->", self.cyan, ", ".join(used), self.endc) else: used = "" index += 1 installed = "" if find_package(pkg + self.meta.sp, self.meta.pkg_path): if self.meta.use_colors in ["off", "OFF"]: installed = "* " print(" |") print(" {0}{1}: {2}{3}{4} {5}{6}".format( "+--", index, self.green, pkg, self.endc, installed, used)) else: print(" |") print(" {0}{1}: {2}{3}{4} {5}".format( "+--", index, self.red, pkg, self.endc, installed)) if self.meta.use_colors in ["off", "OFF"]: print("\n * = Installed\n") else: print("") # new line at end if "--graph=" in self.flag: self.graph() else: self.msg.done() print("\nNo package was found to match\n") raise SystemExit(1)
['def', 'run', '(', 'self', ')', ':', 'self', '.', 'msg', '.', 'resolving', '(', ')', 'self', '.', 'repositories', '(', ')', 'if', 'self', '.', 'find_pkg', ':', 'self', '.', 'dependencies_list', '.', 'reverse', '(', ')', 'self', '.', 'requires', '=', 'Utils', '(', ')', '.', 'dimensional_list', '(', 'self', '.', 'dependencies_list', ')', 'self', '.', 'dependencies', '=', 'Utils', '(', ')', '.', 'remove_dbs', '(', 'self', '.', 'requires', ')', 'if', 'self', '.', 'dependencies', '==', '[', ']', ':', 'self', '.', 'dependencies', '=', '[', '"No dependencies"', ']', 'if', '"--graph="', 'in', 'self', '.', 'flag', ':', 'self', '.', 'deps_tree', '(', ')', 'self', '.', 'msg', '.', 'done', '(', ')', 'pkg_len', '=', 'len', '(', 'self', '.', 'name', ')', '+', '24', 'print', '(', '""', ')', '# new line at start', 'self', '.', 'msg', '.', 'template', '(', 'pkg_len', ')', 'print', '(', '"| Package {0}{1}{2} dependencies :"', '.', 'format', '(', 'self', '.', 'cyan', ',', 'self', '.', 'name', ',', 'self', '.', 'endc', ')', ')', 'self', '.', 'msg', '.', 'template', '(', 'pkg_len', ')', 'print', '(', '"\\\\"', ')', 'print', '(', '" +---{0}[ Tree of dependencies ]{1}"', '.', 'format', '(', 'self', '.', 'yellow', ',', 'self', '.', 'endc', ')', ')', 'index', '=', '0', 'for', 'pkg', 'in', 'self', '.', 'dependencies', ':', 'if', '"--check-deps"', 'in', 'self', '.', 'flag', ':', 'used', '=', 'self', '.', 'check_used', '(', 'pkg', ')', 'self', '.', 'deps_used', '(', 'pkg', ',', 'used', ')', 'used', '=', '"{0} {1}{2}{3}"', '.', 'format', '(', '"is dependency -->"', ',', 'self', '.', 'cyan', ',', '", "', '.', 'join', '(', 'used', ')', ',', 'self', '.', 'endc', ')', 'else', ':', 'used', '=', '""', 'index', '+=', '1', 'installed', '=', '""', 'if', 'find_package', '(', 'pkg', '+', 'self', '.', 'meta', '.', 'sp', ',', 'self', '.', 'meta', '.', 'pkg_path', ')', ':', 'if', 'self', '.', 'meta', '.', 'use_colors', 'in', '[', '"off"', ',', '"OFF"', ']', ':', 'installed', '=', '"* "', 'print', '(', '" |"', ')', 'print', '(', '" {0}{1}: {2}{3}{4} {5}{6}"', '.', 'format', '(', '"+--"', ',', 'index', ',', 'self', '.', 'green', ',', 'pkg', ',', 'self', '.', 'endc', ',', 'installed', ',', 'used', ')', ')', 'else', ':', 'print', '(', '" |"', ')', 'print', '(', '" {0}{1}: {2}{3}{4} {5}"', '.', 'format', '(', '"+--"', ',', 'index', ',', 'self', '.', 'red', ',', 'pkg', ',', 'self', '.', 'endc', ',', 'installed', ')', ')', 'if', 'self', '.', 'meta', '.', 'use_colors', 'in', '[', '"off"', ',', '"OFF"', ']', ':', 'print', '(', '"\\n * = Installed\\n"', ')', 'else', ':', 'print', '(', '""', ')', '# new line at end', 'if', '"--graph="', 'in', 'self', '.', 'flag', ':', 'self', '.', 'graph', '(', ')', 'else', ':', 'self', '.', 'msg', '.', 'done', '(', ')', 'print', '(', '"\\nNo package was found to match\\n"', ')', 'raise', 'SystemExit', '(', '1', ')']
Run tracking dependencies
['Run', 'tracking', 'dependencies']
train
https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/tracking.py#L67-L123
1,067
jason-weirather/py-seq-tools
seqtools/format/sam/__init__.py
SAM.original_query_sequence_length
def original_query_sequence_length(self): """Similar to get_get_query_sequence_length, but it also includes hard clipped bases if there is no cigar, then default to trying the sequence :return: the length of the query before any clipping :rtype: int """ if not self.is_aligned() or not self.entries.cigar: return self.query_sequence_length # take the naive approach # we are here with something aligned so take more intelligent cigar apporach return sum([x[0] for x in self.cigar_array if re.match('[HMIS=X]',x[1])])
python
def original_query_sequence_length(self): """Similar to get_get_query_sequence_length, but it also includes hard clipped bases if there is no cigar, then default to trying the sequence :return: the length of the query before any clipping :rtype: int """ if not self.is_aligned() or not self.entries.cigar: return self.query_sequence_length # take the naive approach # we are here with something aligned so take more intelligent cigar apporach return sum([x[0] for x in self.cigar_array if re.match('[HMIS=X]',x[1])])
['def', 'original_query_sequence_length', '(', 'self', ')', ':', 'if', 'not', 'self', '.', 'is_aligned', '(', ')', 'or', 'not', 'self', '.', 'entries', '.', 'cigar', ':', 'return', 'self', '.', 'query_sequence_length', '# take the naive approach', '# we are here with something aligned so take more intelligent cigar apporach', 'return', 'sum', '(', '[', 'x', '[', '0', ']', 'for', 'x', 'in', 'self', '.', 'cigar_array', 'if', 're', '.', 'match', '(', "'[HMIS=X]'", ',', 'x', '[', '1', ']', ')', ']', ')']
Similar to get_get_query_sequence_length, but it also includes hard clipped bases if there is no cigar, then default to trying the sequence :return: the length of the query before any clipping :rtype: int
['Similar', 'to', 'get_get_query_sequence_length', 'but', 'it', 'also', 'includes', 'hard', 'clipped', 'bases', 'if', 'there', 'is', 'no', 'cigar', 'then', 'default', 'to', 'trying', 'the', 'sequence']
train
https://github.com/jason-weirather/py-seq-tools/blob/f642c2c73ffef2acc83656a78059a476fc734ca1/seqtools/format/sam/__init__.py#L195-L206
1,068
artefactual-labs/mets-reader-writer
metsrw/fsentry.py
FSEntry.dir
def dir(cls, label, children): """Return ``FSEntry`` directory object.""" return FSEntry(label=label, children=children, type=u"Directory", use=None)
python
def dir(cls, label, children): """Return ``FSEntry`` directory object.""" return FSEntry(label=label, children=children, type=u"Directory", use=None)
['def', 'dir', '(', 'cls', ',', 'label', ',', 'children', ')', ':', 'return', 'FSEntry', '(', 'label', '=', 'label', ',', 'children', '=', 'children', ',', 'type', '=', 'u"Directory"', ',', 'use', '=', 'None', ')']
Return ``FSEntry`` directory object.
['Return', 'FSEntry', 'directory', 'object', '.']
train
https://github.com/artefactual-labs/mets-reader-writer/blob/d95939cabdfdc25cb1bf67df0c84bd0d6e6a73ff/metsrw/fsentry.py#L170-L172
1,069
tanghaibao/jcvi
jcvi/assembly/hic.py
CLMFile.evaluate_tour_P
def evaluate_tour_P(self, tour): """ Use Cythonized version to evaluate the score of a current tour, with better precision on the distance of the contigs. """ from .chic import score_evaluate_P return score_evaluate_P(tour, self.active_sizes, self.P)
python
def evaluate_tour_P(self, tour): """ Use Cythonized version to evaluate the score of a current tour, with better precision on the distance of the contigs. """ from .chic import score_evaluate_P return score_evaluate_P(tour, self.active_sizes, self.P)
['def', 'evaluate_tour_P', '(', 'self', ',', 'tour', ')', ':', 'from', '.', 'chic', 'import', 'score_evaluate_P', 'return', 'score_evaluate_P', '(', 'tour', ',', 'self', '.', 'active_sizes', ',', 'self', '.', 'P', ')']
Use Cythonized version to evaluate the score of a current tour, with better precision on the distance of the contigs.
['Use', 'Cythonized', 'version', 'to', 'evaluate', 'the', 'score', 'of', 'a', 'current', 'tour', 'with', 'better', 'precision', 'on', 'the', 'distance', 'of', 'the', 'contigs', '.']
train
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/hic.py#L268-L273
1,070
materialsproject/pymatgen
pymatgen/io/fiesta.py
FiestaInput.from_file
def from_file(cls, filename): """ Read an Fiesta input from a file. Currently tested to work with files generated from this class itself. Args: filename: Filename to parse. Returns: FiestaInput object """ with zopen(filename) as f: return cls.from_string(f.read())
python
def from_file(cls, filename): """ Read an Fiesta input from a file. Currently tested to work with files generated from this class itself. Args: filename: Filename to parse. Returns: FiestaInput object """ with zopen(filename) as f: return cls.from_string(f.read())
['def', 'from_file', '(', 'cls', ',', 'filename', ')', ':', 'with', 'zopen', '(', 'filename', ')', 'as', 'f', ':', 'return', 'cls', '.', 'from_string', '(', 'f', '.', 'read', '(', ')', ')']
Read an Fiesta input from a file. Currently tested to work with files generated from this class itself. Args: filename: Filename to parse. Returns: FiestaInput object
['Read', 'an', 'Fiesta', 'input', 'from', 'a', 'file', '.', 'Currently', 'tested', 'to', 'work', 'with', 'files', 'generated', 'from', 'this', 'class', 'itself', '.']
train
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/fiesta.py#L672-L684
1,071
mitsei/dlkit
dlkit/json_/repository/sessions.py
AssetAdminSession.update_asset_content
def update_asset_content(self, asset_content_form): """Updates an existing asset content. arg: asset_content_form (osid.repository.AssetContentForm): the form containing the elements to be updated raise: IllegalState - ``asset_content_form`` already used in an update transaction raise: InvalidArgument - the form contains an invalid value raise: NullArgument - ``asset_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``asset_content_form`` did not originate from ``get_asset_content_form_for_update()`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.repository.AssetAdminSession.update_asset_content_template from dlkit.abstract_osid.repository.objects import AssetContentForm as ABCAssetContentForm collection = JSONClientValidated('repository', collection='Asset', runtime=self._runtime) if not isinstance(asset_content_form, ABCAssetContentForm): raise errors.InvalidArgument('argument type is not an AssetContentForm') if not asset_content_form.is_for_update(): raise errors.InvalidArgument('the AssetContentForm is for update only, not create') try: if self._forms[asset_content_form.get_id().get_identifier()] == UPDATED: raise errors.IllegalState('asset_content_form already used in an update transaction') except KeyError: raise errors.Unsupported('asset_content_form did not originate from this session') if not asset_content_form.is_valid(): raise errors.InvalidArgument('one or more of the form elements is invalid') asset_id = Id(asset_content_form._my_map['assetId']).get_identifier() asset = collection.find_one( {'$and': [{'_id': ObjectId(asset_id)}, {'assigned' + self._catalog_name + 'Ids': {'$in': [str(self._catalog_id)]}}]}) index = 0 found = False for i in asset['assetContents']: if i['_id'] == ObjectId(asset_content_form._my_map['_id']): asset['assetContents'].pop(index) asset['assetContents'].insert(index, asset_content_form._my_map) found = True break index += 1 if not found: raise errors.NotFound() try: collection.save(asset) except: # what exceptions does mongodb save raise? raise errors.OperationFailed() self._forms[asset_content_form.get_id().get_identifier()] = UPDATED # Note: this is out of spec. The OSIDs don't require an object to be returned: from .objects import AssetContent return AssetContent( osid_object_map=asset_content_form._my_map, runtime=self._runtime, proxy=self._proxy)
python
def update_asset_content(self, asset_content_form): """Updates an existing asset content. arg: asset_content_form (osid.repository.AssetContentForm): the form containing the elements to be updated raise: IllegalState - ``asset_content_form`` already used in an update transaction raise: InvalidArgument - the form contains an invalid value raise: NullArgument - ``asset_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``asset_content_form`` did not originate from ``get_asset_content_form_for_update()`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.repository.AssetAdminSession.update_asset_content_template from dlkit.abstract_osid.repository.objects import AssetContentForm as ABCAssetContentForm collection = JSONClientValidated('repository', collection='Asset', runtime=self._runtime) if not isinstance(asset_content_form, ABCAssetContentForm): raise errors.InvalidArgument('argument type is not an AssetContentForm') if not asset_content_form.is_for_update(): raise errors.InvalidArgument('the AssetContentForm is for update only, not create') try: if self._forms[asset_content_form.get_id().get_identifier()] == UPDATED: raise errors.IllegalState('asset_content_form already used in an update transaction') except KeyError: raise errors.Unsupported('asset_content_form did not originate from this session') if not asset_content_form.is_valid(): raise errors.InvalidArgument('one or more of the form elements is invalid') asset_id = Id(asset_content_form._my_map['assetId']).get_identifier() asset = collection.find_one( {'$and': [{'_id': ObjectId(asset_id)}, {'assigned' + self._catalog_name + 'Ids': {'$in': [str(self._catalog_id)]}}]}) index = 0 found = False for i in asset['assetContents']: if i['_id'] == ObjectId(asset_content_form._my_map['_id']): asset['assetContents'].pop(index) asset['assetContents'].insert(index, asset_content_form._my_map) found = True break index += 1 if not found: raise errors.NotFound() try: collection.save(asset) except: # what exceptions does mongodb save raise? raise errors.OperationFailed() self._forms[asset_content_form.get_id().get_identifier()] = UPDATED # Note: this is out of spec. The OSIDs don't require an object to be returned: from .objects import AssetContent return AssetContent( osid_object_map=asset_content_form._my_map, runtime=self._runtime, proxy=self._proxy)
['def', 'update_asset_content', '(', 'self', ',', 'asset_content_form', ')', ':', '# Implemented from template for', '# osid.repository.AssetAdminSession.update_asset_content_template', 'from', 'dlkit', '.', 'abstract_osid', '.', 'repository', '.', 'objects', 'import', 'AssetContentForm', 'as', 'ABCAssetContentForm', 'collection', '=', 'JSONClientValidated', '(', "'repository'", ',', 'collection', '=', "'Asset'", ',', 'runtime', '=', 'self', '.', '_runtime', ')', 'if', 'not', 'isinstance', '(', 'asset_content_form', ',', 'ABCAssetContentForm', ')', ':', 'raise', 'errors', '.', 'InvalidArgument', '(', "'argument type is not an AssetContentForm'", ')', 'if', 'not', 'asset_content_form', '.', 'is_for_update', '(', ')', ':', 'raise', 'errors', '.', 'InvalidArgument', '(', "'the AssetContentForm is for update only, not create'", ')', 'try', ':', 'if', 'self', '.', '_forms', '[', 'asset_content_form', '.', 'get_id', '(', ')', '.', 'get_identifier', '(', ')', ']', '==', 'UPDATED', ':', 'raise', 'errors', '.', 'IllegalState', '(', "'asset_content_form already used in an update transaction'", ')', 'except', 'KeyError', ':', 'raise', 'errors', '.', 'Unsupported', '(', "'asset_content_form did not originate from this session'", ')', 'if', 'not', 'asset_content_form', '.', 'is_valid', '(', ')', ':', 'raise', 'errors', '.', 'InvalidArgument', '(', "'one or more of the form elements is invalid'", ')', 'asset_id', '=', 'Id', '(', 'asset_content_form', '.', '_my_map', '[', "'assetId'", ']', ')', '.', 'get_identifier', '(', ')', 'asset', '=', 'collection', '.', 'find_one', '(', '{', "'$and'", ':', '[', '{', "'_id'", ':', 'ObjectId', '(', 'asset_id', ')', '}', ',', '{', "'assigned'", '+', 'self', '.', '_catalog_name', '+', "'Ids'", ':', '{', "'$in'", ':', '[', 'str', '(', 'self', '.', '_catalog_id', ')', ']', '}', '}', ']', '}', ')', 'index', '=', '0', 'found', '=', 'False', 'for', 'i', 'in', 'asset', '[', "'assetContents'", ']', ':', 'if', 'i', '[', "'_id'", ']', '==', 'ObjectId', '(', 'asset_content_form', '.', '_my_map', '[', "'_id'", ']', ')', ':', 'asset', '[', "'assetContents'", ']', '.', 'pop', '(', 'index', ')', 'asset', '[', "'assetContents'", ']', '.', 'insert', '(', 'index', ',', 'asset_content_form', '.', '_my_map', ')', 'found', '=', 'True', 'break', 'index', '+=', '1', 'if', 'not', 'found', ':', 'raise', 'errors', '.', 'NotFound', '(', ')', 'try', ':', 'collection', '.', 'save', '(', 'asset', ')', 'except', ':', '# what exceptions does mongodb save raise?', 'raise', 'errors', '.', 'OperationFailed', '(', ')', 'self', '.', '_forms', '[', 'asset_content_form', '.', 'get_id', '(', ')', '.', 'get_identifier', '(', ')', ']', '=', 'UPDATED', "# Note: this is out of spec. The OSIDs don't require an object to be returned:", 'from', '.', 'objects', 'import', 'AssetContent', 'return', 'AssetContent', '(', 'osid_object_map', '=', 'asset_content_form', '.', '_my_map', ',', 'runtime', '=', 'self', '.', '_runtime', ',', 'proxy', '=', 'self', '.', '_proxy', ')']
Updates an existing asset content. arg: asset_content_form (osid.repository.AssetContentForm): the form containing the elements to be updated raise: IllegalState - ``asset_content_form`` already used in an update transaction raise: InvalidArgument - the form contains an invalid value raise: NullArgument - ``asset_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``asset_content_form`` did not originate from ``get_asset_content_form_for_update()`` *compliance: mandatory -- This method must be implemented.*
['Updates', 'an', 'existing', 'asset', 'content', '.']
train
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/sessions.py#L1749-L1808
1,072
deepmind/pysc2
pysc2/lib/renderer_human.py
RendererHuman.draw_build_target
def draw_build_target(self, surf): """Draw the build target.""" round_half = lambda v, cond: round(v - 0.5) + 0.5 if cond else round(v) queued_action = self._queued_action if queued_action: radius = queued_action.footprint_radius if radius: pos = self.get_mouse_pos() if pos: pos = point.Point(round_half(pos.world_pos.x, (radius * 2) % 2), round_half(pos.world_pos.y, (radius * 2) % 2)) surf.draw_circle( colors.PLAYER_ABSOLUTE_PALETTE[ self._obs.observation.player_common.player_id], pos, radius)
python
def draw_build_target(self, surf): """Draw the build target.""" round_half = lambda v, cond: round(v - 0.5) + 0.5 if cond else round(v) queued_action = self._queued_action if queued_action: radius = queued_action.footprint_radius if radius: pos = self.get_mouse_pos() if pos: pos = point.Point(round_half(pos.world_pos.x, (radius * 2) % 2), round_half(pos.world_pos.y, (radius * 2) % 2)) surf.draw_circle( colors.PLAYER_ABSOLUTE_PALETTE[ self._obs.observation.player_common.player_id], pos, radius)
['def', 'draw_build_target', '(', 'self', ',', 'surf', ')', ':', 'round_half', '=', 'lambda', 'v', ',', 'cond', ':', 'round', '(', 'v', '-', '0.5', ')', '+', '0.5', 'if', 'cond', 'else', 'round', '(', 'v', ')', 'queued_action', '=', 'self', '.', '_queued_action', 'if', 'queued_action', ':', 'radius', '=', 'queued_action', '.', 'footprint_radius', 'if', 'radius', ':', 'pos', '=', 'self', '.', 'get_mouse_pos', '(', ')', 'if', 'pos', ':', 'pos', '=', 'point', '.', 'Point', '(', 'round_half', '(', 'pos', '.', 'world_pos', '.', 'x', ',', '(', 'radius', '*', '2', ')', '%', '2', ')', ',', 'round_half', '(', 'pos', '.', 'world_pos', '.', 'y', ',', '(', 'radius', '*', '2', ')', '%', '2', ')', ')', 'surf', '.', 'draw_circle', '(', 'colors', '.', 'PLAYER_ABSOLUTE_PALETTE', '[', 'self', '.', '_obs', '.', 'observation', '.', 'player_common', '.', 'player_id', ']', ',', 'pos', ',', 'radius', ')']
Draw the build target.
['Draw', 'the', 'build', 'target', '.']
train
https://github.com/deepmind/pysc2/blob/df4cc4b00f07a2242be9ba153d4a7f4ad2017897/pysc2/lib/renderer_human.py#L1013-L1028
1,073
mwouts/jupytext
jupytext/metadata_filter.py
update_metadata_filters
def update_metadata_filters(metadata, jupyter_md, cell_metadata): """Update or set the notebook and cell metadata filters""" cell_metadata = [m for m in cell_metadata if m not in ['language', 'magic_args']] if 'cell_metadata_filter' in metadata.get('jupytext', {}): metadata_filter = metadata_filter_as_dict(metadata.get('jupytext', {})['cell_metadata_filter']) if isinstance(metadata_filter.get('excluded'), list): metadata_filter['excluded'] = [key for key in metadata_filter['excluded'] if key not in cell_metadata] metadata_filter.setdefault('additional', []) if isinstance(metadata_filter.get('additional'), list): for key in cell_metadata: if key not in metadata_filter['additional']: metadata_filter['additional'].append(key) metadata.setdefault('jupytext', {})['cell_metadata_filter'] = metadata_filter_as_string(metadata_filter) if not jupyter_md: # Set a metadata filter equal to the current metadata in script cell_metadata = {'additional': cell_metadata, 'excluded': 'all'} metadata.setdefault('jupytext', {})['notebook_metadata_filter'] = '-all' metadata.setdefault('jupytext', {})['cell_metadata_filter'] = metadata_filter_as_string(cell_metadata)
python
def update_metadata_filters(metadata, jupyter_md, cell_metadata): """Update or set the notebook and cell metadata filters""" cell_metadata = [m for m in cell_metadata if m not in ['language', 'magic_args']] if 'cell_metadata_filter' in metadata.get('jupytext', {}): metadata_filter = metadata_filter_as_dict(metadata.get('jupytext', {})['cell_metadata_filter']) if isinstance(metadata_filter.get('excluded'), list): metadata_filter['excluded'] = [key for key in metadata_filter['excluded'] if key not in cell_metadata] metadata_filter.setdefault('additional', []) if isinstance(metadata_filter.get('additional'), list): for key in cell_metadata: if key not in metadata_filter['additional']: metadata_filter['additional'].append(key) metadata.setdefault('jupytext', {})['cell_metadata_filter'] = metadata_filter_as_string(metadata_filter) if not jupyter_md: # Set a metadata filter equal to the current metadata in script cell_metadata = {'additional': cell_metadata, 'excluded': 'all'} metadata.setdefault('jupytext', {})['notebook_metadata_filter'] = '-all' metadata.setdefault('jupytext', {})['cell_metadata_filter'] = metadata_filter_as_string(cell_metadata)
['def', 'update_metadata_filters', '(', 'metadata', ',', 'jupyter_md', ',', 'cell_metadata', ')', ':', 'cell_metadata', '=', '[', 'm', 'for', 'm', 'in', 'cell_metadata', 'if', 'm', 'not', 'in', '[', "'language'", ',', "'magic_args'", ']', ']', 'if', "'cell_metadata_filter'", 'in', 'metadata', '.', 'get', '(', "'jupytext'", ',', '{', '}', ')', ':', 'metadata_filter', '=', 'metadata_filter_as_dict', '(', 'metadata', '.', 'get', '(', "'jupytext'", ',', '{', '}', ')', '[', "'cell_metadata_filter'", ']', ')', 'if', 'isinstance', '(', 'metadata_filter', '.', 'get', '(', "'excluded'", ')', ',', 'list', ')', ':', 'metadata_filter', '[', "'excluded'", ']', '=', '[', 'key', 'for', 'key', 'in', 'metadata_filter', '[', "'excluded'", ']', 'if', 'key', 'not', 'in', 'cell_metadata', ']', 'metadata_filter', '.', 'setdefault', '(', "'additional'", ',', '[', ']', ')', 'if', 'isinstance', '(', 'metadata_filter', '.', 'get', '(', "'additional'", ')', ',', 'list', ')', ':', 'for', 'key', 'in', 'cell_metadata', ':', 'if', 'key', 'not', 'in', 'metadata_filter', '[', "'additional'", ']', ':', 'metadata_filter', '[', "'additional'", ']', '.', 'append', '(', 'key', ')', 'metadata', '.', 'setdefault', '(', "'jupytext'", ',', '{', '}', ')', '[', "'cell_metadata_filter'", ']', '=', 'metadata_filter_as_string', '(', 'metadata_filter', ')', 'if', 'not', 'jupyter_md', ':', '# Set a metadata filter equal to the current metadata in script', 'cell_metadata', '=', '{', "'additional'", ':', 'cell_metadata', ',', "'excluded'", ':', "'all'", '}', 'metadata', '.', 'setdefault', '(', "'jupytext'", ',', '{', '}', ')', '[', "'notebook_metadata_filter'", ']', '=', "'-all'", 'metadata', '.', 'setdefault', '(', "'jupytext'", ',', '{', '}', ')', '[', "'cell_metadata_filter'", ']', '=', 'metadata_filter_as_string', '(', 'cell_metadata', ')']
Update or set the notebook and cell metadata filters
['Update', 'or', 'set', 'the', 'notebook', 'and', 'cell', 'metadata', 'filters']
train
https://github.com/mwouts/jupytext/blob/eb7d6aee889f80ad779cfc53441c648f0db9246d/jupytext/metadata_filter.py#L66-L86
1,074
thumbor/thumbor
thumbor/filters/frame.py
Filter.handle_padding
def handle_padding(self, padding): '''Pads the image with transparent pixels if necessary.''' left = padding[0] top = padding[1] right = padding[2] bottom = padding[3] offset_x = 0 offset_y = 0 new_width = self.engine.size[0] new_height = self.engine.size[1] if left > 0: offset_x = left new_width += left if top > 0: offset_y = top new_height += top if right > 0: new_width += right if bottom > 0: new_height += bottom new_engine = self.context.modules.engine.__class__(self.context) new_engine.image = new_engine.gen_image((new_width, new_height), '#fff') new_engine.enable_alpha() new_engine.paste(self.engine, (offset_x, offset_y)) self.engine.image = new_engine.image
python
def handle_padding(self, padding): '''Pads the image with transparent pixels if necessary.''' left = padding[0] top = padding[1] right = padding[2] bottom = padding[3] offset_x = 0 offset_y = 0 new_width = self.engine.size[0] new_height = self.engine.size[1] if left > 0: offset_x = left new_width += left if top > 0: offset_y = top new_height += top if right > 0: new_width += right if bottom > 0: new_height += bottom new_engine = self.context.modules.engine.__class__(self.context) new_engine.image = new_engine.gen_image((new_width, new_height), '#fff') new_engine.enable_alpha() new_engine.paste(self.engine, (offset_x, offset_y)) self.engine.image = new_engine.image
['def', 'handle_padding', '(', 'self', ',', 'padding', ')', ':', 'left', '=', 'padding', '[', '0', ']', 'top', '=', 'padding', '[', '1', ']', 'right', '=', 'padding', '[', '2', ']', 'bottom', '=', 'padding', '[', '3', ']', 'offset_x', '=', '0', 'offset_y', '=', '0', 'new_width', '=', 'self', '.', 'engine', '.', 'size', '[', '0', ']', 'new_height', '=', 'self', '.', 'engine', '.', 'size', '[', '1', ']', 'if', 'left', '>', '0', ':', 'offset_x', '=', 'left', 'new_width', '+=', 'left', 'if', 'top', '>', '0', ':', 'offset_y', '=', 'top', 'new_height', '+=', 'top', 'if', 'right', '>', '0', ':', 'new_width', '+=', 'right', 'if', 'bottom', '>', '0', ':', 'new_height', '+=', 'bottom', 'new_engine', '=', 'self', '.', 'context', '.', 'modules', '.', 'engine', '.', '__class__', '(', 'self', '.', 'context', ')', 'new_engine', '.', 'image', '=', 'new_engine', '.', 'gen_image', '(', '(', 'new_width', ',', 'new_height', ')', ',', "'#fff'", ')', 'new_engine', '.', 'enable_alpha', '(', ')', 'new_engine', '.', 'paste', '(', 'self', '.', 'engine', ',', '(', 'offset_x', ',', 'offset_y', ')', ')', 'self', '.', 'engine', '.', 'image', '=', 'new_engine', '.', 'image']
Pads the image with transparent pixels if necessary.
['Pads', 'the', 'image', 'with', 'transparent', 'pixels', 'if', 'necessary', '.']
train
https://github.com/thumbor/thumbor/blob/558ccdd6e3bc29e1c9ee3687372c4b3eb05ac607/thumbor/filters/frame.py#L50-L76
1,075
lipoja/URLExtract
urlextract/cachefile.py
CacheFile._load_cached_tlds
def _load_cached_tlds(self): """ Loads TLDs from cached file to set. :return: Set of current TLDs :rtype: set """ # check if cached file is readable if not os.access(self._tld_list_path, os.R_OK): self._logger.error("Cached file is not readable for current " "user. ({})".format(self._tld_list_path)) raise CacheFileError( "Cached file is not readable for current user." ) set_of_tlds = set() with open(self._tld_list_path, 'r') as f_cache_tld: for line in f_cache_tld: tld = line.strip().lower() # skip empty lines if not tld: continue # skip comments if tld[0] == '#': continue set_of_tlds.add("." + tld) set_of_tlds.add("." + idna.decode(tld)) return set_of_tlds
python
def _load_cached_tlds(self): """ Loads TLDs from cached file to set. :return: Set of current TLDs :rtype: set """ # check if cached file is readable if not os.access(self._tld_list_path, os.R_OK): self._logger.error("Cached file is not readable for current " "user. ({})".format(self._tld_list_path)) raise CacheFileError( "Cached file is not readable for current user." ) set_of_tlds = set() with open(self._tld_list_path, 'r') as f_cache_tld: for line in f_cache_tld: tld = line.strip().lower() # skip empty lines if not tld: continue # skip comments if tld[0] == '#': continue set_of_tlds.add("." + tld) set_of_tlds.add("." + idna.decode(tld)) return set_of_tlds
['def', '_load_cached_tlds', '(', 'self', ')', ':', '# check if cached file is readable', 'if', 'not', 'os', '.', 'access', '(', 'self', '.', '_tld_list_path', ',', 'os', '.', 'R_OK', ')', ':', 'self', '.', '_logger', '.', 'error', '(', '"Cached file is not readable for current "', '"user. ({})"', '.', 'format', '(', 'self', '.', '_tld_list_path', ')', ')', 'raise', 'CacheFileError', '(', '"Cached file is not readable for current user."', ')', 'set_of_tlds', '=', 'set', '(', ')', 'with', 'open', '(', 'self', '.', '_tld_list_path', ',', "'r'", ')', 'as', 'f_cache_tld', ':', 'for', 'line', 'in', 'f_cache_tld', ':', 'tld', '=', 'line', '.', 'strip', '(', ')', '.', 'lower', '(', ')', '# skip empty lines', 'if', 'not', 'tld', ':', 'continue', '# skip comments', 'if', 'tld', '[', '0', ']', '==', "'#'", ':', 'continue', 'set_of_tlds', '.', 'add', '(', '"."', '+', 'tld', ')', 'set_of_tlds', '.', 'add', '(', '"."', '+', 'idna', '.', 'decode', '(', 'tld', ')', ')', 'return', 'set_of_tlds']
Loads TLDs from cached file to set. :return: Set of current TLDs :rtype: set
['Loads', 'TLDs', 'from', 'cached', 'file', 'to', 'set', '.']
train
https://github.com/lipoja/URLExtract/blob/b53fd2adfaed3cd23a811aed4d277b0ade7b4640/urlextract/cachefile.py#L190-L220
1,076
flatangle/flatlib
flatlib/predictives/primarydirections.py
PDTable.bySignificator
def bySignificator(self, ID): """ Returns all directions to a significator. """ res = [] for direction in self.table: if ID in direction[2]: res.append(direction) return res
python
def bySignificator(self, ID): """ Returns all directions to a significator. """ res = [] for direction in self.table: if ID in direction[2]: res.append(direction) return res
['def', 'bySignificator', '(', 'self', ',', 'ID', ')', ':', 'res', '=', '[', ']', 'for', 'direction', 'in', 'self', '.', 'table', ':', 'if', 'ID', 'in', 'direction', '[', '2', ']', ':', 'res', '.', 'append', '(', 'direction', ')', 'return', 'res']
Returns all directions to a significator.
['Returns', 'all', 'directions', 'to', 'a', 'significator', '.']
train
https://github.com/flatangle/flatlib/blob/44e05b2991a296c678adbc17a1d51b6a21bc867c/flatlib/predictives/primarydirections.py#L322-L328
1,077
WZBSocialScienceCenter/tmtoolkit
tmtoolkit/topicmod/visualize.py
plot_eval_results
def plot_eval_results(eval_results, metric=None, xaxislabel=None, yaxislabel=None, title=None, title_fontsize='x-large', axes_title_fontsize='large', show_metric_direction=True, metric_direction_font_size='large', subplots_opts=None, subplots_adjust_opts=None, figsize='auto', **fig_kwargs): """ Plot the evaluation results from `eval_results`. `eval_results` must be a sequence containing `(param, values)` tuples, where `param` is the parameter value to appear on the x axis and `values` can be a dict structure containing the metric values. `eval_results` can be created using the `results_by_parameter` function from the `topicmod.common` module. Set `metric` to plot only a specific metric. Set `xaxislabel` for a label on the x-axis. Set `yaxislabel` for a label on the y-axis. Set `title` for a plot title. Options in a dict `subplots_opts` will be passed to `plt.subplots(...)`. Options in a dict `subplots_adjust_opts` will be passed to `fig.subplots_adjust(...)`. `figsize` can be set to a tuple `(width, height)` or to `"auto"` (default) which will set the size to `(8, 2 * <num. of metrics>)`. """ if type(eval_results) not in (list, tuple) or not eval_results: raise ValueError('`eval_results` must be a list or tuple with at least one element') if type(eval_results[0]) not in (list, tuple) or len(eval_results[0]) != 2: raise ValueError('`eval_results` must be a list or tuple containing a (param, values) tuple. ' 'Maybe `eval_results` must be converted with `results_by_parameter`.') if metric is not None and type(metric) not in (list, tuple): metric = [metric] elif metric is None: # remove special evaluation result 'model': the calculated model itself metric = list(set(next(iter(eval_results))[1].keys()) - {'model'}) metric = sorted(metric) metric_direction = [] for m in metric: if m == 'perplexity': metric_direction.append('minimize') else: m_fn_name = 'metric_%s' % (m[:16] if m.startswith('coherence_gensim') else m) m_fn = getattr(evaluate, m_fn_name, None) if m_fn: metric_direction.append(getattr(m_fn, 'direction', 'unknown')) else: metric_direction.append('unknown') n_metrics = len(metric) assert n_metrics == len(metric_direction) metrics_ordered = [] for m_dir in sorted(set(metric_direction), reverse=True): metrics_ordered.extend([(m, d) for m, d in zip(metric, metric_direction) if d == m_dir]) assert n_metrics == len(metrics_ordered) # get figure and subplots (axes) if figsize == 'auto': figsize = (8, 2*n_metrics) subplots_kwargs = dict(nrows=n_metrics, ncols=1, sharex=True, constrained_layout=True, figsize=figsize) subplots_kwargs.update(subplots_opts or {}) subplots_kwargs.update(fig_kwargs) fig, axes = plt.subplots(**subplots_kwargs) # set title if title: fig.suptitle(title, fontsize=title_fontsize) x = list(zip(*eval_results))[0] # set adjustments if title: subplots_adjust_kwargs = dict(top=0.9, hspace=0.3) else: subplots_adjust_kwargs = {} subplots_adjust_kwargs.update(subplots_adjust_opts or {}) if subplots_adjust_kwargs: fig.subplots_adjust(**subplots_adjust_kwargs) # draw subplot for each metric axes_pos_per_dir = defaultdict(list) for i, (ax, (m, m_dir)) in enumerate(zip(axes.flatten(), metrics_ordered)): if show_metric_direction: axes_pos_per_dir[m_dir].append(ax.get_position()) y = [metric_res[m] for _, metric_res in eval_results] ax.plot(x, y, label=m) ax.set_title(m, fontsize=axes_title_fontsize) # set axis labels if xaxislabel and i == len(metric)-1: ax.set_xlabel(xaxislabel) if yaxislabel: ax.set_ylabel(yaxislabel) # show grouped metric direction on the left if axes_pos_per_dir: # = if show_metric_direction left_xs = [] ys = [] for m_dir, bboxes in axes_pos_per_dir.items(): left_xs.append(min(bb.x0 for bb in bboxes)) min_y = min(bb.y0 for bb in bboxes) max_y = max(bb.y1 for bb in bboxes) ys.append((min_y, max_y)) left_x = min(left_xs) / 2.5 fig.lines = [] for (min_y, max_y), m_dir in zip(ys, axes_pos_per_dir.keys()): center_y = min_y + (max_y - min_y) / 2 fig.lines.append(Line2D((left_x, left_x), (min_y, max_y), transform=fig.transFigure, linewidth=5, color='lightgray')) fig.text(left_x / 1.5, center_y, m_dir, fontsize=metric_direction_font_size, rotation='vertical', horizontalalignment='right', verticalalignment='center') return fig, axes
python
def plot_eval_results(eval_results, metric=None, xaxislabel=None, yaxislabel=None, title=None, title_fontsize='x-large', axes_title_fontsize='large', show_metric_direction=True, metric_direction_font_size='large', subplots_opts=None, subplots_adjust_opts=None, figsize='auto', **fig_kwargs): """ Plot the evaluation results from `eval_results`. `eval_results` must be a sequence containing `(param, values)` tuples, where `param` is the parameter value to appear on the x axis and `values` can be a dict structure containing the metric values. `eval_results` can be created using the `results_by_parameter` function from the `topicmod.common` module. Set `metric` to plot only a specific metric. Set `xaxislabel` for a label on the x-axis. Set `yaxislabel` for a label on the y-axis. Set `title` for a plot title. Options in a dict `subplots_opts` will be passed to `plt.subplots(...)`. Options in a dict `subplots_adjust_opts` will be passed to `fig.subplots_adjust(...)`. `figsize` can be set to a tuple `(width, height)` or to `"auto"` (default) which will set the size to `(8, 2 * <num. of metrics>)`. """ if type(eval_results) not in (list, tuple) or not eval_results: raise ValueError('`eval_results` must be a list or tuple with at least one element') if type(eval_results[0]) not in (list, tuple) or len(eval_results[0]) != 2: raise ValueError('`eval_results` must be a list or tuple containing a (param, values) tuple. ' 'Maybe `eval_results` must be converted with `results_by_parameter`.') if metric is not None and type(metric) not in (list, tuple): metric = [metric] elif metric is None: # remove special evaluation result 'model': the calculated model itself metric = list(set(next(iter(eval_results))[1].keys()) - {'model'}) metric = sorted(metric) metric_direction = [] for m in metric: if m == 'perplexity': metric_direction.append('minimize') else: m_fn_name = 'metric_%s' % (m[:16] if m.startswith('coherence_gensim') else m) m_fn = getattr(evaluate, m_fn_name, None) if m_fn: metric_direction.append(getattr(m_fn, 'direction', 'unknown')) else: metric_direction.append('unknown') n_metrics = len(metric) assert n_metrics == len(metric_direction) metrics_ordered = [] for m_dir in sorted(set(metric_direction), reverse=True): metrics_ordered.extend([(m, d) for m, d in zip(metric, metric_direction) if d == m_dir]) assert n_metrics == len(metrics_ordered) # get figure and subplots (axes) if figsize == 'auto': figsize = (8, 2*n_metrics) subplots_kwargs = dict(nrows=n_metrics, ncols=1, sharex=True, constrained_layout=True, figsize=figsize) subplots_kwargs.update(subplots_opts or {}) subplots_kwargs.update(fig_kwargs) fig, axes = plt.subplots(**subplots_kwargs) # set title if title: fig.suptitle(title, fontsize=title_fontsize) x = list(zip(*eval_results))[0] # set adjustments if title: subplots_adjust_kwargs = dict(top=0.9, hspace=0.3) else: subplots_adjust_kwargs = {} subplots_adjust_kwargs.update(subplots_adjust_opts or {}) if subplots_adjust_kwargs: fig.subplots_adjust(**subplots_adjust_kwargs) # draw subplot for each metric axes_pos_per_dir = defaultdict(list) for i, (ax, (m, m_dir)) in enumerate(zip(axes.flatten(), metrics_ordered)): if show_metric_direction: axes_pos_per_dir[m_dir].append(ax.get_position()) y = [metric_res[m] for _, metric_res in eval_results] ax.plot(x, y, label=m) ax.set_title(m, fontsize=axes_title_fontsize) # set axis labels if xaxislabel and i == len(metric)-1: ax.set_xlabel(xaxislabel) if yaxislabel: ax.set_ylabel(yaxislabel) # show grouped metric direction on the left if axes_pos_per_dir: # = if show_metric_direction left_xs = [] ys = [] for m_dir, bboxes in axes_pos_per_dir.items(): left_xs.append(min(bb.x0 for bb in bboxes)) min_y = min(bb.y0 for bb in bboxes) max_y = max(bb.y1 for bb in bboxes) ys.append((min_y, max_y)) left_x = min(left_xs) / 2.5 fig.lines = [] for (min_y, max_y), m_dir in zip(ys, axes_pos_per_dir.keys()): center_y = min_y + (max_y - min_y) / 2 fig.lines.append(Line2D((left_x, left_x), (min_y, max_y), transform=fig.transFigure, linewidth=5, color='lightgray')) fig.text(left_x / 1.5, center_y, m_dir, fontsize=metric_direction_font_size, rotation='vertical', horizontalalignment='right', verticalalignment='center') return fig, axes
['def', 'plot_eval_results', '(', 'eval_results', ',', 'metric', '=', 'None', ',', 'xaxislabel', '=', 'None', ',', 'yaxislabel', '=', 'None', ',', 'title', '=', 'None', ',', 'title_fontsize', '=', "'x-large'", ',', 'axes_title_fontsize', '=', "'large'", ',', 'show_metric_direction', '=', 'True', ',', 'metric_direction_font_size', '=', "'large'", ',', 'subplots_opts', '=', 'None', ',', 'subplots_adjust_opts', '=', 'None', ',', 'figsize', '=', "'auto'", ',', '*', '*', 'fig_kwargs', ')', ':', 'if', 'type', '(', 'eval_results', ')', 'not', 'in', '(', 'list', ',', 'tuple', ')', 'or', 'not', 'eval_results', ':', 'raise', 'ValueError', '(', "'`eval_results` must be a list or tuple with at least one element'", ')', 'if', 'type', '(', 'eval_results', '[', '0', ']', ')', 'not', 'in', '(', 'list', ',', 'tuple', ')', 'or', 'len', '(', 'eval_results', '[', '0', ']', ')', '!=', '2', ':', 'raise', 'ValueError', '(', "'`eval_results` must be a list or tuple containing a (param, values) tuple. '", "'Maybe `eval_results` must be converted with `results_by_parameter`.'", ')', 'if', 'metric', 'is', 'not', 'None', 'and', 'type', '(', 'metric', ')', 'not', 'in', '(', 'list', ',', 'tuple', ')', ':', 'metric', '=', '[', 'metric', ']', 'elif', 'metric', 'is', 'None', ':', "# remove special evaluation result 'model': the calculated model itself", 'metric', '=', 'list', '(', 'set', '(', 'next', '(', 'iter', '(', 'eval_results', ')', ')', '[', '1', ']', '.', 'keys', '(', ')', ')', '-', '{', "'model'", '}', ')', 'metric', '=', 'sorted', '(', 'metric', ')', 'metric_direction', '=', '[', ']', 'for', 'm', 'in', 'metric', ':', 'if', 'm', '==', "'perplexity'", ':', 'metric_direction', '.', 'append', '(', "'minimize'", ')', 'else', ':', 'm_fn_name', '=', "'metric_%s'", '%', '(', 'm', '[', ':', '16', ']', 'if', 'm', '.', 'startswith', '(', "'coherence_gensim'", ')', 'else', 'm', ')', 'm_fn', '=', 'getattr', '(', 'evaluate', ',', 'm_fn_name', ',', 'None', ')', 'if', 'm_fn', ':', 'metric_direction', '.', 'append', '(', 'getattr', '(', 'm_fn', ',', "'direction'", ',', "'unknown'", ')', ')', 'else', ':', 'metric_direction', '.', 'append', '(', "'unknown'", ')', 'n_metrics', '=', 'len', '(', 'metric', ')', 'assert', 'n_metrics', '==', 'len', '(', 'metric_direction', ')', 'metrics_ordered', '=', '[', ']', 'for', 'm_dir', 'in', 'sorted', '(', 'set', '(', 'metric_direction', ')', ',', 'reverse', '=', 'True', ')', ':', 'metrics_ordered', '.', 'extend', '(', '[', '(', 'm', ',', 'd', ')', 'for', 'm', ',', 'd', 'in', 'zip', '(', 'metric', ',', 'metric_direction', ')', 'if', 'd', '==', 'm_dir', ']', ')', 'assert', 'n_metrics', '==', 'len', '(', 'metrics_ordered', ')', '# get figure and subplots (axes)', 'if', 'figsize', '==', "'auto'", ':', 'figsize', '=', '(', '8', ',', '2', '*', 'n_metrics', ')', 'subplots_kwargs', '=', 'dict', '(', 'nrows', '=', 'n_metrics', ',', 'ncols', '=', '1', ',', 'sharex', '=', 'True', ',', 'constrained_layout', '=', 'True', ',', 'figsize', '=', 'figsize', ')', 'subplots_kwargs', '.', 'update', '(', 'subplots_opts', 'or', '{', '}', ')', 'subplots_kwargs', '.', 'update', '(', 'fig_kwargs', ')', 'fig', ',', 'axes', '=', 'plt', '.', 'subplots', '(', '*', '*', 'subplots_kwargs', ')', '# set title', 'if', 'title', ':', 'fig', '.', 'suptitle', '(', 'title', ',', 'fontsize', '=', 'title_fontsize', ')', 'x', '=', 'list', '(', 'zip', '(', '*', 'eval_results', ')', ')', '[', '0', ']', '# set adjustments', 'if', 'title', ':', 'subplots_adjust_kwargs', '=', 'dict', '(', 'top', '=', '0.9', ',', 'hspace', '=', '0.3', ')', 'else', ':', 'subplots_adjust_kwargs', '=', '{', '}', 'subplots_adjust_kwargs', '.', 'update', '(', 'subplots_adjust_opts', 'or', '{', '}', ')', 'if', 'subplots_adjust_kwargs', ':', 'fig', '.', 'subplots_adjust', '(', '*', '*', 'subplots_adjust_kwargs', ')', '# draw subplot for each metric', 'axes_pos_per_dir', '=', 'defaultdict', '(', 'list', ')', 'for', 'i', ',', '(', 'ax', ',', '(', 'm', ',', 'm_dir', ')', ')', 'in', 'enumerate', '(', 'zip', '(', 'axes', '.', 'flatten', '(', ')', ',', 'metrics_ordered', ')', ')', ':', 'if', 'show_metric_direction', ':', 'axes_pos_per_dir', '[', 'm_dir', ']', '.', 'append', '(', 'ax', '.', 'get_position', '(', ')', ')', 'y', '=', '[', 'metric_res', '[', 'm', ']', 'for', '_', ',', 'metric_res', 'in', 'eval_results', ']', 'ax', '.', 'plot', '(', 'x', ',', 'y', ',', 'label', '=', 'm', ')', 'ax', '.', 'set_title', '(', 'm', ',', 'fontsize', '=', 'axes_title_fontsize', ')', '# set axis labels', 'if', 'xaxislabel', 'and', 'i', '==', 'len', '(', 'metric', ')', '-', '1', ':', 'ax', '.', 'set_xlabel', '(', 'xaxislabel', ')', 'if', 'yaxislabel', ':', 'ax', '.', 'set_ylabel', '(', 'yaxislabel', ')', '# show grouped metric direction on the left', 'if', 'axes_pos_per_dir', ':', '# = if show_metric_direction', 'left_xs', '=', '[', ']', 'ys', '=', '[', ']', 'for', 'm_dir', ',', 'bboxes', 'in', 'axes_pos_per_dir', '.', 'items', '(', ')', ':', 'left_xs', '.', 'append', '(', 'min', '(', 'bb', '.', 'x0', 'for', 'bb', 'in', 'bboxes', ')', ')', 'min_y', '=', 'min', '(', 'bb', '.', 'y0', 'for', 'bb', 'in', 'bboxes', ')', 'max_y', '=', 'max', '(', 'bb', '.', 'y1', 'for', 'bb', 'in', 'bboxes', ')', 'ys', '.', 'append', '(', '(', 'min_y', ',', 'max_y', ')', ')', 'left_x', '=', 'min', '(', 'left_xs', ')', '/', '2.5', 'fig', '.', 'lines', '=', '[', ']', 'for', '(', 'min_y', ',', 'max_y', ')', ',', 'm_dir', 'in', 'zip', '(', 'ys', ',', 'axes_pos_per_dir', '.', 'keys', '(', ')', ')', ':', 'center_y', '=', 'min_y', '+', '(', 'max_y', '-', 'min_y', ')', '/', '2', 'fig', '.', 'lines', '.', 'append', '(', 'Line2D', '(', '(', 'left_x', ',', 'left_x', ')', ',', '(', 'min_y', ',', 'max_y', ')', ',', 'transform', '=', 'fig', '.', 'transFigure', ',', 'linewidth', '=', '5', ',', 'color', '=', "'lightgray'", ')', ')', 'fig', '.', 'text', '(', 'left_x', '/', '1.5', ',', 'center_y', ',', 'm_dir', ',', 'fontsize', '=', 'metric_direction_font_size', ',', 'rotation', '=', "'vertical'", ',', 'horizontalalignment', '=', "'right'", ',', 'verticalalignment', '=', "'center'", ')', 'return', 'fig', ',', 'axes']
Plot the evaluation results from `eval_results`. `eval_results` must be a sequence containing `(param, values)` tuples, where `param` is the parameter value to appear on the x axis and `values` can be a dict structure containing the metric values. `eval_results` can be created using the `results_by_parameter` function from the `topicmod.common` module. Set `metric` to plot only a specific metric. Set `xaxislabel` for a label on the x-axis. Set `yaxislabel` for a label on the y-axis. Set `title` for a plot title. Options in a dict `subplots_opts` will be passed to `plt.subplots(...)`. Options in a dict `subplots_adjust_opts` will be passed to `fig.subplots_adjust(...)`. `figsize` can be set to a tuple `(width, height)` or to `"auto"` (default) which will set the size to `(8, 2 * <num. of metrics>)`.
['Plot', 'the', 'evaluation', 'results', 'from', 'eval_results', '.', 'eval_results', 'must', 'be', 'a', 'sequence', 'containing', '(', 'param', 'values', ')', 'tuples', 'where', 'param', 'is', 'the', 'parameter', 'value', 'to', 'appear', 'on', 'the', 'x', 'axis', 'and', 'values', 'can', 'be', 'a', 'dict', 'structure', 'containing', 'the', 'metric', 'values', '.', 'eval_results', 'can', 'be', 'created', 'using', 'the', 'results_by_parameter', 'function', 'from', 'the', 'topicmod', '.', 'common', 'module', '.', 'Set', 'metric', 'to', 'plot', 'only', 'a', 'specific', 'metric', '.', 'Set', 'xaxislabel', 'for', 'a', 'label', 'on', 'the', 'x', '-', 'axis', '.', 'Set', 'yaxislabel', 'for', 'a', 'label', 'on', 'the', 'y', '-', 'axis', '.', 'Set', 'title', 'for', 'a', 'plot', 'title', '.', 'Options', 'in', 'a', 'dict', 'subplots_opts', 'will', 'be', 'passed', 'to', 'plt', '.', 'subplots', '(', '...', ')', '.', 'Options', 'in', 'a', 'dict', 'subplots_adjust_opts', 'will', 'be', 'passed', 'to', 'fig', '.', 'subplots_adjust', '(', '...', ')', '.', 'figsize', 'can', 'be', 'set', 'to', 'a', 'tuple', '(', 'width', 'height', ')', 'or', 'to', 'auto', '(', 'default', ')', 'which', 'will', 'set', 'the', 'size', 'to', '(', '8', '2', '*', '<num', '.', 'of', 'metrics', '>', ')', '.']
train
https://github.com/WZBSocialScienceCenter/tmtoolkit/blob/ca8b9d072e37ccc82b533f47d48bd9755722305b/tmtoolkit/topicmod/visualize.py#L312-L434
1,078
spyder-ide/spyder
spyder/widgets/comboboxes.py
PathComboBox.add_tooltip_to_highlighted_item
def add_tooltip_to_highlighted_item(self, index): """ Add a tooltip showing the full path of the currently highlighted item of the PathComboBox. """ self.setItemData(index, self.itemText(index), Qt.ToolTipRole)
python
def add_tooltip_to_highlighted_item(self, index): """ Add a tooltip showing the full path of the currently highlighted item of the PathComboBox. """ self.setItemData(index, self.itemText(index), Qt.ToolTipRole)
['def', 'add_tooltip_to_highlighted_item', '(', 'self', ',', 'index', ')', ':', 'self', '.', 'setItemData', '(', 'index', ',', 'self', '.', 'itemText', '(', 'index', ')', ',', 'Qt', '.', 'ToolTipRole', ')']
Add a tooltip showing the full path of the currently highlighted item of the PathComboBox.
['Add', 'a', 'tooltip', 'showing', 'the', 'full', 'path', 'of', 'the', 'currently', 'highlighted', 'item', 'of', 'the', 'PathComboBox', '.']
train
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/widgets/comboboxes.py#L287-L292
1,079
kislyuk/aegea
aegea/packages/github3/api.py
iter_following
def iter_following(username, number=-1, etag=None): """List the people ``username`` follows. :param str username: (required), login of the user :param int number: (optional), number of users being followed by username to return. Default: -1, return all of them :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`User <github3.users.User>` """ return gh.iter_following(username, number, etag) if username else []
python
def iter_following(username, number=-1, etag=None): """List the people ``username`` follows. :param str username: (required), login of the user :param int number: (optional), number of users being followed by username to return. Default: -1, return all of them :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`User <github3.users.User>` """ return gh.iter_following(username, number, etag) if username else []
['def', 'iter_following', '(', 'username', ',', 'number', '=', '-', '1', ',', 'etag', '=', 'None', ')', ':', 'return', 'gh', '.', 'iter_following', '(', 'username', ',', 'number', ',', 'etag', ')', 'if', 'username', 'else', '[', ']']
List the people ``username`` follows. :param str username: (required), login of the user :param int number: (optional), number of users being followed by username to return. Default: -1, return all of them :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`User <github3.users.User>`
['List', 'the', 'people', 'username', 'follows', '.']
train
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/api.py#L152-L163
1,080
xray7224/PyPump
pypump/models/__init__.py
Addressable._set_people
def _set_people(self, people): """ Sets who the object is sent to """ if hasattr(people, "object_type"): people = [people] elif hasattr(people, "__iter__"): people = list(people) return people
python
def _set_people(self, people): """ Sets who the object is sent to """ if hasattr(people, "object_type"): people = [people] elif hasattr(people, "__iter__"): people = list(people) return people
['def', '_set_people', '(', 'self', ',', 'people', ')', ':', 'if', 'hasattr', '(', 'people', ',', '"object_type"', ')', ':', 'people', '=', '[', 'people', ']', 'elif', 'hasattr', '(', 'people', ',', '"__iter__"', ')', ':', 'people', '=', 'list', '(', 'people', ')', 'return', 'people']
Sets who the object is sent to
['Sets', 'who', 'the', 'object', 'is', 'sent', 'to']
train
https://github.com/xray7224/PyPump/blob/f921f691c39fe021f4fd124b6bc91718c9e49b4a/pypump/models/__init__.py#L493-L500
1,081
fboender/ansible-cmdb
src/ansiblecmdb/ansible.py
Ansible.get_hosts
def get_hosts(self): """ Return a list of parsed hosts info, with the limit applied if required. """ limited_hosts = {} if self.limit is not None: # Find hosts and groups of hosts to include for include in self.limit['include']: # Include whole group for hostname in self.hosts_in_group(include): limited_hosts[hostname] = self.hosts[hostname] # Include individual host if include in self.hosts: limited_hosts[include] = self.hosts[include] # Find hosts and groups of hosts to exclude for exclude in self.limit["exclude"]: # Exclude whole group for hostname in self.hosts_in_group(exclude): if hostname in limited_hosts: limited_hosts.pop(hostname) # Exclude individual host if exclude in limited_hosts: limited_hosts.pop(exclude) return limited_hosts else: # Return all hosts return self.hosts
python
def get_hosts(self): """ Return a list of parsed hosts info, with the limit applied if required. """ limited_hosts = {} if self.limit is not None: # Find hosts and groups of hosts to include for include in self.limit['include']: # Include whole group for hostname in self.hosts_in_group(include): limited_hosts[hostname] = self.hosts[hostname] # Include individual host if include in self.hosts: limited_hosts[include] = self.hosts[include] # Find hosts and groups of hosts to exclude for exclude in self.limit["exclude"]: # Exclude whole group for hostname in self.hosts_in_group(exclude): if hostname in limited_hosts: limited_hosts.pop(hostname) # Exclude individual host if exclude in limited_hosts: limited_hosts.pop(exclude) return limited_hosts else: # Return all hosts return self.hosts
['def', 'get_hosts', '(', 'self', ')', ':', 'limited_hosts', '=', '{', '}', 'if', 'self', '.', 'limit', 'is', 'not', 'None', ':', '# Find hosts and groups of hosts to include', 'for', 'include', 'in', 'self', '.', 'limit', '[', "'include'", ']', ':', '# Include whole group', 'for', 'hostname', 'in', 'self', '.', 'hosts_in_group', '(', 'include', ')', ':', 'limited_hosts', '[', 'hostname', ']', '=', 'self', '.', 'hosts', '[', 'hostname', ']', '# Include individual host', 'if', 'include', 'in', 'self', '.', 'hosts', ':', 'limited_hosts', '[', 'include', ']', '=', 'self', '.', 'hosts', '[', 'include', ']', '# Find hosts and groups of hosts to exclude', 'for', 'exclude', 'in', 'self', '.', 'limit', '[', '"exclude"', ']', ':', '# Exclude whole group', 'for', 'hostname', 'in', 'self', '.', 'hosts_in_group', '(', 'exclude', ')', ':', 'if', 'hostname', 'in', 'limited_hosts', ':', 'limited_hosts', '.', 'pop', '(', 'hostname', ')', '# Exclude individual host', 'if', 'exclude', 'in', 'limited_hosts', ':', 'limited_hosts', '.', 'pop', '(', 'exclude', ')', 'return', 'limited_hosts', 'else', ':', '# Return all hosts', 'return', 'self', '.', 'hosts']
Return a list of parsed hosts info, with the limit applied if required.
['Return', 'a', 'list', 'of', 'parsed', 'hosts', 'info', 'with', 'the', 'limit', 'applied', 'if', 'required', '.']
train
https://github.com/fboender/ansible-cmdb/blob/ebd960ac10684e8c9ec2b12751bba2c4c9504ab7/src/ansiblecmdb/ansible.py#L363-L390
1,082
IdentityPython/oidcendpoint
src/oidcendpoint/oidc/discovery.py
Discovery.do_response
def do_response(self, response_args=None, request=None, **kwargs): """ **Placeholder for the time being** :param response_args: :param request: :param kwargs: request arguments :return: Response information """ links = [Link(href=h, rel=OIC_ISSUER) for h in kwargs['hrefs']] _response = JRD(subject=kwargs['subject'], links=links) info = { 'response': _response.to_json(), 'http_headers': [('Content-type', 'application/json')] } return info
python
def do_response(self, response_args=None, request=None, **kwargs): """ **Placeholder for the time being** :param response_args: :param request: :param kwargs: request arguments :return: Response information """ links = [Link(href=h, rel=OIC_ISSUER) for h in kwargs['hrefs']] _response = JRD(subject=kwargs['subject'], links=links) info = { 'response': _response.to_json(), 'http_headers': [('Content-type', 'application/json')] } return info
['def', 'do_response', '(', 'self', ',', 'response_args', '=', 'None', ',', 'request', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'links', '=', '[', 'Link', '(', 'href', '=', 'h', ',', 'rel', '=', 'OIC_ISSUER', ')', 'for', 'h', 'in', 'kwargs', '[', "'hrefs'", ']', ']', '_response', '=', 'JRD', '(', 'subject', '=', 'kwargs', '[', "'subject'", ']', ',', 'links', '=', 'links', ')', 'info', '=', '{', "'response'", ':', '_response', '.', 'to_json', '(', ')', ',', "'http_headers'", ':', '[', '(', "'Content-type'", ',', "'application/json'", ')', ']', '}', 'return', 'info']
**Placeholder for the time being** :param response_args: :param request: :param kwargs: request arguments :return: Response information
['**', 'Placeholder', 'for', 'the', 'time', 'being', '**']
train
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/oidc/discovery.py#L17-L36
1,083
OCHA-DAP/hdx-python-api
src/hdx/data/resource_view.py
ResourceView._update_resource_view
def _update_resource_view(self, log=False): # type: () -> bool """Check if resource view exists in HDX and if so, update resource view Returns: bool: True if updated and False if not """ update = False if 'id' in self.data and self._load_from_hdx('resource view', self.data['id']): update = True else: if 'resource_id' in self.data: resource_views = self.get_all_for_resource(self.data['resource_id']) for resource_view in resource_views: if self.data['title'] == resource_view['title']: self.old_data = self.data self.data = resource_view.data update = True break if update: if log: logger.warning('resource view exists. Updating %s' % self.data['id']) self._merge_hdx_update('resource view', 'id') return update
python
def _update_resource_view(self, log=False): # type: () -> bool """Check if resource view exists in HDX and if so, update resource view Returns: bool: True if updated and False if not """ update = False if 'id' in self.data and self._load_from_hdx('resource view', self.data['id']): update = True else: if 'resource_id' in self.data: resource_views = self.get_all_for_resource(self.data['resource_id']) for resource_view in resource_views: if self.data['title'] == resource_view['title']: self.old_data = self.data self.data = resource_view.data update = True break if update: if log: logger.warning('resource view exists. Updating %s' % self.data['id']) self._merge_hdx_update('resource view', 'id') return update
['def', '_update_resource_view', '(', 'self', ',', 'log', '=', 'False', ')', ':', '# type: () -> bool', 'update', '=', 'False', 'if', "'id'", 'in', 'self', '.', 'data', 'and', 'self', '.', '_load_from_hdx', '(', "'resource view'", ',', 'self', '.', 'data', '[', "'id'", ']', ')', ':', 'update', '=', 'True', 'else', ':', 'if', "'resource_id'", 'in', 'self', '.', 'data', ':', 'resource_views', '=', 'self', '.', 'get_all_for_resource', '(', 'self', '.', 'data', '[', "'resource_id'", ']', ')', 'for', 'resource_view', 'in', 'resource_views', ':', 'if', 'self', '.', 'data', '[', "'title'", ']', '==', 'resource_view', '[', "'title'", ']', ':', 'self', '.', 'old_data', '=', 'self', '.', 'data', 'self', '.', 'data', '=', 'resource_view', '.', 'data', 'update', '=', 'True', 'break', 'if', 'update', ':', 'if', 'log', ':', 'logger', '.', 'warning', '(', "'resource view exists. Updating %s'", '%', 'self', '.', 'data', '[', "'id'", ']', ')', 'self', '.', '_merge_hdx_update', '(', "'resource view'", ',', "'id'", ')', 'return', 'update']
Check if resource view exists in HDX and if so, update resource view Returns: bool: True if updated and False if not
['Check', 'if', 'resource', 'view', 'exists', 'in', 'HDX', 'and', 'if', 'so', 'update', 'resource', 'view']
train
https://github.com/OCHA-DAP/hdx-python-api/blob/212440f54f73805826a16db77dbcb6033b18a313/src/hdx/data/resource_view.py#L124-L147
1,084
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_wp.py
WPModule.wp_status
def wp_status(self): '''show status of wp download''' try: print("Have %u of %u waypoints" % (self.wploader.count()+len(self.wp_received), self.wploader.expected_count)) except Exception: print("Have %u waypoints" % (self.wploader.count()+len(self.wp_received)))
python
def wp_status(self): '''show status of wp download''' try: print("Have %u of %u waypoints" % (self.wploader.count()+len(self.wp_received), self.wploader.expected_count)) except Exception: print("Have %u waypoints" % (self.wploader.count()+len(self.wp_received)))
['def', 'wp_status', '(', 'self', ')', ':', 'try', ':', 'print', '(', '"Have %u of %u waypoints"', '%', '(', 'self', '.', 'wploader', '.', 'count', '(', ')', '+', 'len', '(', 'self', '.', 'wp_received', ')', ',', 'self', '.', 'wploader', '.', 'expected_count', ')', ')', 'except', 'Exception', ':', 'print', '(', '"Have %u waypoints"', '%', '(', 'self', '.', 'wploader', '.', 'count', '(', ')', '+', 'len', '(', 'self', '.', 'wp_received', ')', ')', ')']
show status of wp download
['show', 'status', 'of', 'wp', 'download']
train
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_wp.py#L89-L94
1,085
push-things/django-th
th_evernote/my_evernote.py
ServiceEvernote.auth
def auth(self, request): """ let's auth the user to the Service """ client = self.get_evernote_client() request_token = client.get_request_token(self.callback_url(request)) # Save the request token information for later request.session['oauth_token'] = request_token['oauth_token'] request.session['oauth_token_secret'] = request_token['oauth_token_secret'] # Redirect the user to the Evernote authorization URL # return the URL string which will be used by redirect() # from the calling func return client.get_authorize_url(request_token)
python
def auth(self, request): """ let's auth the user to the Service """ client = self.get_evernote_client() request_token = client.get_request_token(self.callback_url(request)) # Save the request token information for later request.session['oauth_token'] = request_token['oauth_token'] request.session['oauth_token_secret'] = request_token['oauth_token_secret'] # Redirect the user to the Evernote authorization URL # return the URL string which will be used by redirect() # from the calling func return client.get_authorize_url(request_token)
['def', 'auth', '(', 'self', ',', 'request', ')', ':', 'client', '=', 'self', '.', 'get_evernote_client', '(', ')', 'request_token', '=', 'client', '.', 'get_request_token', '(', 'self', '.', 'callback_url', '(', 'request', ')', ')', '# Save the request token information for later', 'request', '.', 'session', '[', "'oauth_token'", ']', '=', 'request_token', '[', "'oauth_token'", ']', 'request', '.', 'session', '[', "'oauth_token_secret'", ']', '=', 'request_token', '[', "'oauth_token_secret'", ']', '# Redirect the user to the Evernote authorization URL', '# return the URL string which will be used by redirect()', '# from the calling func', 'return', 'client', '.', 'get_authorize_url', '(', 'request_token', ')']
let's auth the user to the Service
['let', 's', 'auth', 'the', 'user', 'to', 'the', 'Service']
train
https://github.com/push-things/django-th/blob/86c999d16bcf30b6224206e5b40824309834ac8c/th_evernote/my_evernote.py#L275-L287
1,086
dbrgn/RPLCD
RPLCD/lcd.py
BaseCharLCD.create_char
def create_char(self, location, bitmap): """Create a new character. The HD44780 supports up to 8 custom characters (location 0-7). :param location: The place in memory where the character is stored. Values need to be integers between 0 and 7. :type location: int :param bitmap: The bitmap containing the character. This should be a tuple of 8 numbers, each representing a 5 pixel row. :type bitmap: tuple of int :raises AssertionError: Raised when an invalid location is passed in or when bitmap has an incorrect size. Example: .. sourcecode:: python >>> smiley = ( ... 0b00000, ... 0b01010, ... 0b01010, ... 0b00000, ... 0b10001, ... 0b10001, ... 0b01110, ... 0b00000, ... ) >>> lcd.create_char(0, smiley) """ assert 0 <= location <= 7, 'Only locations 0-7 are valid.' assert len(bitmap) == 8, 'Bitmap should have exactly 8 rows.' # Store previous position pos = self.cursor_pos # Write character to CGRAM self.command(c.LCD_SETCGRAMADDR | location << 3) for row in bitmap: self._send_data(row) # Restore cursor pos self.cursor_pos = pos
python
def create_char(self, location, bitmap): """Create a new character. The HD44780 supports up to 8 custom characters (location 0-7). :param location: The place in memory where the character is stored. Values need to be integers between 0 and 7. :type location: int :param bitmap: The bitmap containing the character. This should be a tuple of 8 numbers, each representing a 5 pixel row. :type bitmap: tuple of int :raises AssertionError: Raised when an invalid location is passed in or when bitmap has an incorrect size. Example: .. sourcecode:: python >>> smiley = ( ... 0b00000, ... 0b01010, ... 0b01010, ... 0b00000, ... 0b10001, ... 0b10001, ... 0b01110, ... 0b00000, ... ) >>> lcd.create_char(0, smiley) """ assert 0 <= location <= 7, 'Only locations 0-7 are valid.' assert len(bitmap) == 8, 'Bitmap should have exactly 8 rows.' # Store previous position pos = self.cursor_pos # Write character to CGRAM self.command(c.LCD_SETCGRAMADDR | location << 3) for row in bitmap: self._send_data(row) # Restore cursor pos self.cursor_pos = pos
['def', 'create_char', '(', 'self', ',', 'location', ',', 'bitmap', ')', ':', 'assert', '0', '<=', 'location', '<=', '7', ',', "'Only locations 0-7 are valid.'", 'assert', 'len', '(', 'bitmap', ')', '==', '8', ',', "'Bitmap should have exactly 8 rows.'", '# Store previous position', 'pos', '=', 'self', '.', 'cursor_pos', '# Write character to CGRAM', 'self', '.', 'command', '(', 'c', '.', 'LCD_SETCGRAMADDR', '|', 'location', '<<', '3', ')', 'for', 'row', 'in', 'bitmap', ':', 'self', '.', '_send_data', '(', 'row', ')', '# Restore cursor pos', 'self', '.', 'cursor_pos', '=', 'pos']
Create a new character. The HD44780 supports up to 8 custom characters (location 0-7). :param location: The place in memory where the character is stored. Values need to be integers between 0 and 7. :type location: int :param bitmap: The bitmap containing the character. This should be a tuple of 8 numbers, each representing a 5 pixel row. :type bitmap: tuple of int :raises AssertionError: Raised when an invalid location is passed in or when bitmap has an incorrect size. Example: .. sourcecode:: python >>> smiley = ( ... 0b00000, ... 0b01010, ... 0b01010, ... 0b00000, ... 0b10001, ... 0b10001, ... 0b01110, ... 0b00000, ... ) >>> lcd.create_char(0, smiley)
['Create', 'a', 'new', 'character', '.']
train
https://github.com/dbrgn/RPLCD/blob/95fe5da1354d466d661cdc84e1637ce557700c8c/RPLCD/lcd.py#L332-L375
1,087
ArchiveTeam/wpull
wpull/network/connection.py
BaseConnection.closed
def closed(self) -> bool: '''Return whether the connection is closed.''' return not self.writer or not self.reader or self.reader.at_eof()
python
def closed(self) -> bool: '''Return whether the connection is closed.''' return not self.writer or not self.reader or self.reader.at_eof()
['def', 'closed', '(', 'self', ')', '->', 'bool', ':', 'return', 'not', 'self', '.', 'writer', 'or', 'not', 'self', '.', 'reader', 'or', 'self', '.', 'reader', '.', 'at_eof', '(', ')']
Return whether the connection is closed.
['Return', 'whether', 'the', 'connection', 'is', 'closed', '.']
train
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/network/connection.py#L161-L163
1,088
wummel/patool
patoolib/programs/star.py
create_tar
def create_tar (archive, compression, cmd, verbosity, interactive, filenames): """Create a TAR archive.""" cmdlist = [cmd, '-c'] add_star_opts(cmdlist, compression, verbosity) cmdlist.append("file=%s" % archive) cmdlist.extend(filenames) return cmdlist
python
def create_tar (archive, compression, cmd, verbosity, interactive, filenames): """Create a TAR archive.""" cmdlist = [cmd, '-c'] add_star_opts(cmdlist, compression, verbosity) cmdlist.append("file=%s" % archive) cmdlist.extend(filenames) return cmdlist
['def', 'create_tar', '(', 'archive', ',', 'compression', ',', 'cmd', ',', 'verbosity', ',', 'interactive', ',', 'filenames', ')', ':', 'cmdlist', '=', '[', 'cmd', ',', "'-c'", ']', 'add_star_opts', '(', 'cmdlist', ',', 'compression', ',', 'verbosity', ')', 'cmdlist', '.', 'append', '(', '"file=%s"', '%', 'archive', ')', 'cmdlist', '.', 'extend', '(', 'filenames', ')', 'return', 'cmdlist']
Create a TAR archive.
['Create', 'a', 'TAR', 'archive', '.']
train
https://github.com/wummel/patool/blob/d7e64d9fd60faaa4b3f824bd97c43ce59b185c40/patoolib/programs/star.py#L35-L41
1,089
Nukesor/pueue
pueue/daemon/queue.py
Queue.write
def write(self): """Write the current queue to a file. We need this to continue an earlier session.""" queue_path = os.path.join(self.config_dir, 'queue') queue_file = open(queue_path, 'wb+') try: pickle.dump(self.queue, queue_file, -1) except Exception: print('Error while writing to queue file. Wrong file permissions?') queue_file.close()
python
def write(self): """Write the current queue to a file. We need this to continue an earlier session.""" queue_path = os.path.join(self.config_dir, 'queue') queue_file = open(queue_path, 'wb+') try: pickle.dump(self.queue, queue_file, -1) except Exception: print('Error while writing to queue file. Wrong file permissions?') queue_file.close()
['def', 'write', '(', 'self', ')', ':', 'queue_path', '=', 'os', '.', 'path', '.', 'join', '(', 'self', '.', 'config_dir', ',', "'queue'", ')', 'queue_file', '=', 'open', '(', 'queue_path', ',', "'wb+'", ')', 'try', ':', 'pickle', '.', 'dump', '(', 'self', '.', 'queue', ',', 'queue_file', ',', '-', '1', ')', 'except', 'Exception', ':', 'print', '(', "'Error while writing to queue file. Wrong file permissions?'", ')', 'queue_file', '.', 'close', '(', ')']
Write the current queue to a file. We need this to continue an earlier session.
['Write', 'the', 'current', 'queue', 'to', 'a', 'file', '.', 'We', 'need', 'this', 'to', 'continue', 'an', 'earlier', 'session', '.']
train
https://github.com/Nukesor/pueue/blob/f1d276360454d4dd2738658a13df1e20caa4b926/pueue/daemon/queue.py#L108-L116
1,090
Jaymon/endpoints
endpoints/interface/uwsgi/client.py
WebsocketClient.connect
def connect(self, path="", headers=None, query=None, timeout=0, **kwargs): """ make the actual connection to the websocket :param headers: dict, key/val pairs of any headers to add to connection, if you would like to override headers just pass in an empty value :param query: dict, any query string params you want to send up with the connection url :returns: Payload, this will return the CONNECT response from the websocket """ ret = None ws_url = self.get_fetch_url(path, query) ws_headers = self.get_fetch_headers("GET", headers) ws_headers = ['{}: {}'.format(h[0], h[1]) for h in ws_headers.items() if h[1]] timeout = self.get_timeout(timeout=timeout, **kwargs) self.set_trace(kwargs.pop("trace", False)) #pout.v(websocket_url, websocket_headers, self.query_kwargs, self.headers) try: logger.debug("{} connecting to {}".format(self.client_id, ws_url)) self.ws = websocket.create_connection( ws_url, header=ws_headers, timeout=timeout, sslopt={'cert_reqs':ssl.CERT_NONE}, ) ret = self.recv_callback(callback=lambda r: r.uuid == "CONNECT") if ret.code >= 400: raise IOError("Failed to connect with code {}".format(ret.code)) # self.headers = headers # self.query_kwargs = query_kwargs except websocket.WebSocketTimeoutException: raise IOError("Failed to connect within {} seconds".format(timeout)) except websocket.WebSocketException as e: raise IOError("Failed to connect with error: {}".format(e)) except socket.error as e: # this is an IOError, I just wanted to be aware of that, most common # problem is: [Errno 111] Connection refused raise return ret
python
def connect(self, path="", headers=None, query=None, timeout=0, **kwargs): """ make the actual connection to the websocket :param headers: dict, key/val pairs of any headers to add to connection, if you would like to override headers just pass in an empty value :param query: dict, any query string params you want to send up with the connection url :returns: Payload, this will return the CONNECT response from the websocket """ ret = None ws_url = self.get_fetch_url(path, query) ws_headers = self.get_fetch_headers("GET", headers) ws_headers = ['{}: {}'.format(h[0], h[1]) for h in ws_headers.items() if h[1]] timeout = self.get_timeout(timeout=timeout, **kwargs) self.set_trace(kwargs.pop("trace", False)) #pout.v(websocket_url, websocket_headers, self.query_kwargs, self.headers) try: logger.debug("{} connecting to {}".format(self.client_id, ws_url)) self.ws = websocket.create_connection( ws_url, header=ws_headers, timeout=timeout, sslopt={'cert_reqs':ssl.CERT_NONE}, ) ret = self.recv_callback(callback=lambda r: r.uuid == "CONNECT") if ret.code >= 400: raise IOError("Failed to connect with code {}".format(ret.code)) # self.headers = headers # self.query_kwargs = query_kwargs except websocket.WebSocketTimeoutException: raise IOError("Failed to connect within {} seconds".format(timeout)) except websocket.WebSocketException as e: raise IOError("Failed to connect with error: {}".format(e)) except socket.error as e: # this is an IOError, I just wanted to be aware of that, most common # problem is: [Errno 111] Connection refused raise return ret
['def', 'connect', '(', 'self', ',', 'path', '=', '""', ',', 'headers', '=', 'None', ',', 'query', '=', 'None', ',', 'timeout', '=', '0', ',', '*', '*', 'kwargs', ')', ':', 'ret', '=', 'None', 'ws_url', '=', 'self', '.', 'get_fetch_url', '(', 'path', ',', 'query', ')', 'ws_headers', '=', 'self', '.', 'get_fetch_headers', '(', '"GET"', ',', 'headers', ')', 'ws_headers', '=', '[', "'{}: {}'", '.', 'format', '(', 'h', '[', '0', ']', ',', 'h', '[', '1', ']', ')', 'for', 'h', 'in', 'ws_headers', '.', 'items', '(', ')', 'if', 'h', '[', '1', ']', ']', 'timeout', '=', 'self', '.', 'get_timeout', '(', 'timeout', '=', 'timeout', ',', '*', '*', 'kwargs', ')', 'self', '.', 'set_trace', '(', 'kwargs', '.', 'pop', '(', '"trace"', ',', 'False', ')', ')', '#pout.v(websocket_url, websocket_headers, self.query_kwargs, self.headers)', 'try', ':', 'logger', '.', 'debug', '(', '"{} connecting to {}"', '.', 'format', '(', 'self', '.', 'client_id', ',', 'ws_url', ')', ')', 'self', '.', 'ws', '=', 'websocket', '.', 'create_connection', '(', 'ws_url', ',', 'header', '=', 'ws_headers', ',', 'timeout', '=', 'timeout', ',', 'sslopt', '=', '{', "'cert_reqs'", ':', 'ssl', '.', 'CERT_NONE', '}', ',', ')', 'ret', '=', 'self', '.', 'recv_callback', '(', 'callback', '=', 'lambda', 'r', ':', 'r', '.', 'uuid', '==', '"CONNECT"', ')', 'if', 'ret', '.', 'code', '>=', '400', ':', 'raise', 'IOError', '(', '"Failed to connect with code {}"', '.', 'format', '(', 'ret', '.', 'code', ')', ')', '# self.headers = headers', '# self.query_kwargs = query_kwargs', 'except', 'websocket', '.', 'WebSocketTimeoutException', ':', 'raise', 'IOError', '(', '"Failed to connect within {} seconds"', '.', 'format', '(', 'timeout', ')', ')', 'except', 'websocket', '.', 'WebSocketException', 'as', 'e', ':', 'raise', 'IOError', '(', '"Failed to connect with error: {}"', '.', 'format', '(', 'e', ')', ')', 'except', 'socket', '.', 'error', 'as', 'e', ':', '# this is an IOError, I just wanted to be aware of that, most common', '# problem is: [Errno 111] Connection refused', 'raise', 'return', 'ret']
make the actual connection to the websocket :param headers: dict, key/val pairs of any headers to add to connection, if you would like to override headers just pass in an empty value :param query: dict, any query string params you want to send up with the connection url :returns: Payload, this will return the CONNECT response from the websocket
['make', 'the', 'actual', 'connection', 'to', 'the', 'websocket']
train
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/interface/uwsgi/client.py#L105-L151
1,091
dinoboff/schemabuilder
src/schemabuilder/schema.py
Ref.validate
def validate(self, data): """Validate the data against the schema. """ validator = self._schema.validator(self._id) validator.validate(data)
python
def validate(self, data): """Validate the data against the schema. """ validator = self._schema.validator(self._id) validator.validate(data)
['def', 'validate', '(', 'self', ',', 'data', ')', ':', 'validator', '=', 'self', '.', '_schema', '.', 'validator', '(', 'self', '.', '_id', ')', 'validator', '.', 'validate', '(', 'data', ')']
Validate the data against the schema.
['Validate', 'the', 'data', 'against', 'the', 'schema', '.']
train
https://github.com/dinoboff/schemabuilder/blob/9b9f3de2528836ad069f458d3d68b1b5f4efbe94/src/schemabuilder/schema.py#L93-L98
1,092
cisco-sas/kitty
examples/05_fuzz_with_session/session_server.py
TCPServer.shutdown_request
def shutdown_request(self, request): """ Called to shutdown and close an individual request. """ try: request.shutdown(socket.SHUT_WR) except socket.error: pass self.close_request(request)
python
def shutdown_request(self, request): """ Called to shutdown and close an individual request. """ try: request.shutdown(socket.SHUT_WR) except socket.error: pass self.close_request(request)
['def', 'shutdown_request', '(', 'self', ',', 'request', ')', ':', 'try', ':', 'request', '.', 'shutdown', '(', 'socket', '.', 'SHUT_WR', ')', 'except', 'socket', '.', 'error', ':', 'pass', 'self', '.', 'close_request', '(', 'request', ')']
Called to shutdown and close an individual request.
['Called', 'to', 'shutdown', 'and', 'close', 'an', 'individual', 'request', '.']
train
https://github.com/cisco-sas/kitty/blob/cb0760989dcdfe079e43ac574d872d0b18953a32/examples/05_fuzz_with_session/session_server.py#L337-L347
1,093
learningequality/ricecooker
ricecooker/sushi_bar_client.py
ReconnectingWebSocket.run
def run(self): """ If the connection drops, then run_forever will terminate and a reconnection attempt will be made. """ while True: self.connect_lock.acquire() if self.stopped(): return self.__connect() self.connect_lock.release() self.ws.run_forever()
python
def run(self): """ If the connection drops, then run_forever will terminate and a reconnection attempt will be made. """ while True: self.connect_lock.acquire() if self.stopped(): return self.__connect() self.connect_lock.release() self.ws.run_forever()
['def', 'run', '(', 'self', ')', ':', 'while', 'True', ':', 'self', '.', 'connect_lock', '.', 'acquire', '(', ')', 'if', 'self', '.', 'stopped', '(', ')', ':', 'return', 'self', '.', '__connect', '(', ')', 'self', '.', 'connect_lock', '.', 'release', '(', ')', 'self', '.', 'ws', '.', 'run_forever', '(', ')']
If the connection drops, then run_forever will terminate and a reconnection attempt will be made.
['If', 'the', 'connection', 'drops', 'then', 'run_forever', 'will', 'terminate', 'and', 'a', 'reconnection', 'attempt', 'will', 'be', 'made', '.']
train
https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/sushi_bar_client.py#L38-L49
1,094
openstack/proliantutils
proliantutils/redfish/resources/system/storage/storage.py
Storage.volumes
def volumes(self): """This property prepares the list of volumes :return a list of volumes. """ return sys_volumes.VolumeCollection( self._conn, utils.get_subresource_path_by(self, 'Volumes'), redfish_version=self.redfish_version)
python
def volumes(self): """This property prepares the list of volumes :return a list of volumes. """ return sys_volumes.VolumeCollection( self._conn, utils.get_subresource_path_by(self, 'Volumes'), redfish_version=self.redfish_version)
['def', 'volumes', '(', 'self', ')', ':', 'return', 'sys_volumes', '.', 'VolumeCollection', '(', 'self', '.', '_conn', ',', 'utils', '.', 'get_subresource_path_by', '(', 'self', ',', "'Volumes'", ')', ',', 'redfish_version', '=', 'self', '.', 'redfish_version', ')']
This property prepares the list of volumes :return a list of volumes.
['This', 'property', 'prepares', 'the', 'list', 'of', 'volumes']
train
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/storage/storage.py#L46-L53
1,095
mandiant/ioc_writer
ioc_writer/ioc_api.py
IOC.remove_parameter
def remove_parameter(self, param_id=None, name=None, ref_id=None, ): """ Removes parameters based on function arguments. This can remove parameters based on the following param values: param/@id param/@name param/@ref_id Each input is mutually exclusive. Calling this function with multiple values set will cause an IOCParseError exception. Calling this function without setting one value will raise an exception. :param param_id: The id of the parameter to remove. :param name: The name of the parameter to remove. :param ref_id: The IndicatorItem/Indicator id of the parameter to remove. :return: Number of parameters removed. """ l = [] if param_id: l.append('param_id') if name: l.append('name') if ref_id: l.append('ref_id') if len(l) > 1: raise IOCParseError('Must specify only param_id, name or ref_id. Specified {}'.format(str(l))) elif len(l) < 1: raise IOCParseError('Must specifiy an param_id, name or ref_id to remove a paramater') counter = 0 parameters_node = self.parameters if param_id: params = parameters_node.xpath('//param[@id="{}"]'.format(param_id)) for param in params: parameters_node.remove(param) counter += 1 elif name: params = parameters_node.xpath('//param[@name="{}"]'.format(name)) for param in params: parameters_node.remove(param) counter += 1 elif ref_id: params = parameters_node.xpath('//param[@ref-id="{}"]'.format(ref_id)) for param in params: parameters_node.remove(param) counter += 1 return counter
python
def remove_parameter(self, param_id=None, name=None, ref_id=None, ): """ Removes parameters based on function arguments. This can remove parameters based on the following param values: param/@id param/@name param/@ref_id Each input is mutually exclusive. Calling this function with multiple values set will cause an IOCParseError exception. Calling this function without setting one value will raise an exception. :param param_id: The id of the parameter to remove. :param name: The name of the parameter to remove. :param ref_id: The IndicatorItem/Indicator id of the parameter to remove. :return: Number of parameters removed. """ l = [] if param_id: l.append('param_id') if name: l.append('name') if ref_id: l.append('ref_id') if len(l) > 1: raise IOCParseError('Must specify only param_id, name or ref_id. Specified {}'.format(str(l))) elif len(l) < 1: raise IOCParseError('Must specifiy an param_id, name or ref_id to remove a paramater') counter = 0 parameters_node = self.parameters if param_id: params = parameters_node.xpath('//param[@id="{}"]'.format(param_id)) for param in params: parameters_node.remove(param) counter += 1 elif name: params = parameters_node.xpath('//param[@name="{}"]'.format(name)) for param in params: parameters_node.remove(param) counter += 1 elif ref_id: params = parameters_node.xpath('//param[@ref-id="{}"]'.format(ref_id)) for param in params: parameters_node.remove(param) counter += 1 return counter
['def', 'remove_parameter', '(', 'self', ',', 'param_id', '=', 'None', ',', 'name', '=', 'None', ',', 'ref_id', '=', 'None', ',', ')', ':', 'l', '=', '[', ']', 'if', 'param_id', ':', 'l', '.', 'append', '(', "'param_id'", ')', 'if', 'name', ':', 'l', '.', 'append', '(', "'name'", ')', 'if', 'ref_id', ':', 'l', '.', 'append', '(', "'ref_id'", ')', 'if', 'len', '(', 'l', ')', '>', '1', ':', 'raise', 'IOCParseError', '(', "'Must specify only param_id, name or ref_id. Specified {}'", '.', 'format', '(', 'str', '(', 'l', ')', ')', ')', 'elif', 'len', '(', 'l', ')', '<', '1', ':', 'raise', 'IOCParseError', '(', "'Must specifiy an param_id, name or ref_id to remove a paramater'", ')', 'counter', '=', '0', 'parameters_node', '=', 'self', '.', 'parameters', 'if', 'param_id', ':', 'params', '=', 'parameters_node', '.', 'xpath', '(', '\'//param[@id="{}"]\'', '.', 'format', '(', 'param_id', ')', ')', 'for', 'param', 'in', 'params', ':', 'parameters_node', '.', 'remove', '(', 'param', ')', 'counter', '+=', '1', 'elif', 'name', ':', 'params', '=', 'parameters_node', '.', 'xpath', '(', '\'//param[@name="{}"]\'', '.', 'format', '(', 'name', ')', ')', 'for', 'param', 'in', 'params', ':', 'parameters_node', '.', 'remove', '(', 'param', ')', 'counter', '+=', '1', 'elif', 'ref_id', ':', 'params', '=', 'parameters_node', '.', 'xpath', '(', '\'//param[@ref-id="{}"]\'', '.', 'format', '(', 'ref_id', ')', ')', 'for', 'param', 'in', 'params', ':', 'parameters_node', '.', 'remove', '(', 'param', ')', 'counter', '+=', '1', 'return', 'counter']
Removes parameters based on function arguments. This can remove parameters based on the following param values: param/@id param/@name param/@ref_id Each input is mutually exclusive. Calling this function with multiple values set will cause an IOCParseError exception. Calling this function without setting one value will raise an exception. :param param_id: The id of the parameter to remove. :param name: The name of the parameter to remove. :param ref_id: The IndicatorItem/Indicator id of the parameter to remove. :return: Number of parameters removed.
['Removes', 'parameters', 'based', 'on', 'function', 'arguments', '.']
train
https://github.com/mandiant/ioc_writer/blob/712247f3a10bdc2584fa18ac909fc763f71df21a/ioc_writer/ioc_api.py#L502-L549
1,096
johnnoone/aioconsul
aioconsul/client/kv_endpoint.py
WriteMixin._write
async def _write(self, path, data, *, flags=None, cas=None, acquire=None, release=None): """Sets the key to the given value. Returns: bool: ``True`` on success """ if not isinstance(data, bytes): raise ValueError("value must be bytes") path = "/v1/kv/%s" % path response = await self._api.put( path, params={ "flags": flags, "cas": cas, "acquire": acquire, "release": release }, data=data, headers={"Content-Type": "application/octet-stream"}) return response
python
async def _write(self, path, data, *, flags=None, cas=None, acquire=None, release=None): """Sets the key to the given value. Returns: bool: ``True`` on success """ if not isinstance(data, bytes): raise ValueError("value must be bytes") path = "/v1/kv/%s" % path response = await self._api.put( path, params={ "flags": flags, "cas": cas, "acquire": acquire, "release": release }, data=data, headers={"Content-Type": "application/octet-stream"}) return response
['async', 'def', '_write', '(', 'self', ',', 'path', ',', 'data', ',', '*', ',', 'flags', '=', 'None', ',', 'cas', '=', 'None', ',', 'acquire', '=', 'None', ',', 'release', '=', 'None', ')', ':', 'if', 'not', 'isinstance', '(', 'data', ',', 'bytes', ')', ':', 'raise', 'ValueError', '(', '"value must be bytes"', ')', 'path', '=', '"/v1/kv/%s"', '%', 'path', 'response', '=', 'await', 'self', '.', '_api', '.', 'put', '(', 'path', ',', 'params', '=', '{', '"flags"', ':', 'flags', ',', '"cas"', ':', 'cas', ',', '"acquire"', ':', 'acquire', ',', '"release"', ':', 'release', '}', ',', 'data', '=', 'data', ',', 'headers', '=', '{', '"Content-Type"', ':', '"application/octet-stream"', '}', ')', 'return', 'response']
Sets the key to the given value. Returns: bool: ``True`` on success
['Sets', 'the', 'key', 'to', 'the', 'given', 'value', '.']
train
https://github.com/johnnoone/aioconsul/blob/02f7a529d7dc2e49bed942111067aa5faf320e90/aioconsul/client/kv_endpoint.py#L252-L272
1,097
nicolargo/glances
glances/plugins/glances_smart.py
is_admin
def is_admin(): """ https://stackoverflow.com/a/19719292 @return: True if the current user is an 'Admin' whatever that means (root on Unix), otherwise False. Warning: The inner function fails unless you have Windows XP SP2 or higher. The failure causes a traceback to be printed and this function to return False. """ if os.name == 'nt': import ctypes import traceback # WARNING: requires Windows XP SP2 or higher! try: return ctypes.windll.shell32.IsUserAnAdmin() except: traceback.print_exc() return False else: # Check for root on Posix return os.getuid() == 0
python
def is_admin(): """ https://stackoverflow.com/a/19719292 @return: True if the current user is an 'Admin' whatever that means (root on Unix), otherwise False. Warning: The inner function fails unless you have Windows XP SP2 or higher. The failure causes a traceback to be printed and this function to return False. """ if os.name == 'nt': import ctypes import traceback # WARNING: requires Windows XP SP2 or higher! try: return ctypes.windll.shell32.IsUserAnAdmin() except: traceback.print_exc() return False else: # Check for root on Posix return os.getuid() == 0
['def', 'is_admin', '(', ')', ':', 'if', 'os', '.', 'name', '==', "'nt'", ':', 'import', 'ctypes', 'import', 'traceback', '# WARNING: requires Windows XP SP2 or higher!', 'try', ':', 'return', 'ctypes', '.', 'windll', '.', 'shell32', '.', 'IsUserAnAdmin', '(', ')', 'except', ':', 'traceback', '.', 'print_exc', '(', ')', 'return', 'False', 'else', ':', '# Check for root on Posix', 'return', 'os', '.', 'getuid', '(', ')', '==', '0']
https://stackoverflow.com/a/19719292 @return: True if the current user is an 'Admin' whatever that means (root on Unix), otherwise False. Warning: The inner function fails unless you have Windows XP SP2 or higher. The failure causes a traceback to be printed and this function to return False.
['https', ':', '//', 'stackoverflow', '.', 'com', '/', 'a', '/', '19719292']
train
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/plugins/glances_smart.py#L64-L85
1,098
libyal/dtfabric
dtfabric/runtime/data_maps.py
StructureMap._LinearFoldByteStream
def _LinearFoldByteStream(self, mapped_value, **unused_kwargs): """Folds the data type into a byte stream. Args: mapped_value (object): mapped value. Returns: bytes: byte stream. Raises: FoldingError: if the data type definition cannot be folded into the byte stream. """ try: attribute_values = [ getattr(mapped_value, attribute_name, None) for attribute_name in self._attribute_names] attribute_values = [ value for value in attribute_values if value is not None] return self._operation.WriteTo(tuple(attribute_values)) except Exception as exception: error_string = ( 'Unable to write: {0:s} to byte stream with error: {1!s}').format( self._data_type_definition.name, exception) raise errors.FoldingError(error_string)
python
def _LinearFoldByteStream(self, mapped_value, **unused_kwargs): """Folds the data type into a byte stream. Args: mapped_value (object): mapped value. Returns: bytes: byte stream. Raises: FoldingError: if the data type definition cannot be folded into the byte stream. """ try: attribute_values = [ getattr(mapped_value, attribute_name, None) for attribute_name in self._attribute_names] attribute_values = [ value for value in attribute_values if value is not None] return self._operation.WriteTo(tuple(attribute_values)) except Exception as exception: error_string = ( 'Unable to write: {0:s} to byte stream with error: {1!s}').format( self._data_type_definition.name, exception) raise errors.FoldingError(error_string)
['def', '_LinearFoldByteStream', '(', 'self', ',', 'mapped_value', ',', '*', '*', 'unused_kwargs', ')', ':', 'try', ':', 'attribute_values', '=', '[', 'getattr', '(', 'mapped_value', ',', 'attribute_name', ',', 'None', ')', 'for', 'attribute_name', 'in', 'self', '.', '_attribute_names', ']', 'attribute_values', '=', '[', 'value', 'for', 'value', 'in', 'attribute_values', 'if', 'value', 'is', 'not', 'None', ']', 'return', 'self', '.', '_operation', '.', 'WriteTo', '(', 'tuple', '(', 'attribute_values', ')', ')', 'except', 'Exception', 'as', 'exception', ':', 'error_string', '=', '(', "'Unable to write: {0:s} to byte stream with error: {1!s}'", ')', '.', 'format', '(', 'self', '.', '_data_type_definition', '.', 'name', ',', 'exception', ')', 'raise', 'errors', '.', 'FoldingError', '(', 'error_string', ')']
Folds the data type into a byte stream. Args: mapped_value (object): mapped value. Returns: bytes: byte stream. Raises: FoldingError: if the data type definition cannot be folded into the byte stream.
['Folds', 'the', 'data', 'type', 'into', 'a', 'byte', 'stream', '.']
train
https://github.com/libyal/dtfabric/blob/0d2b5719fa257f6e5c661a406737ebcf8c8db266/dtfabric/runtime/data_maps.py#L1773-L1798
1,099
quantumlib/Cirq
cirq/google/sim/xmon_simulator.py
XmonSimulator._base_iterator
def _base_iterator( self, circuit: circuits.Circuit, qubit_order: ops.QubitOrderOrList, initial_state: Union[int, np.ndarray], perform_measurements: bool=True, ) -> Iterator['XmonStepResult']: """See definition in `cirq.SimulatesIntermediateState`. If the initial state is an int, the state is set to the computational basis state corresponding to this state. Otherwise if the initial state is a np.ndarray it is the full initial state. In this case it must be the correct size, be normalized (an L2 norm of 1), and be safely castable to an appropriate dtype for the simulator. """ qubits = ops.QubitOrder.as_qubit_order(qubit_order).order_for( circuit.all_qubits()) qubit_map = {q: i for i, q in enumerate(reversed(qubits))} if isinstance(initial_state, np.ndarray): initial_state = initial_state.astype(dtype=np.complex64, casting='safe') with xmon_stepper.Stepper( num_qubits=len(qubits), num_prefix_qubits=self.options.num_prefix_qubits, initial_state=initial_state, min_qubits_before_shard=self.options.min_qubits_before_shard, use_processes=self.options.use_processes ) as stepper: if len(circuit) == 0: yield XmonStepResult(stepper, qubit_map, {}) for moment in circuit: measurements = collections.defaultdict( list) # type: Dict[str, List[bool]] phase_map = {} # type: Dict[Tuple[int, ...], float] for op in moment.operations: gate = cast(ops.GateOperation, op).gate if isinstance(gate, ops.ZPowGate): index = qubit_map[op.qubits[0]] phase_map[(index,)] = cast(float, gate.exponent) elif isinstance(gate, ops.CZPowGate): index0 = qubit_map[op.qubits[0]] index1 = qubit_map[op.qubits[1]] phase_map[(index0, index1)] = cast(float, gate.exponent) elif isinstance(gate, ops.XPowGate): index = qubit_map[op.qubits[0]] stepper.simulate_w( index=index, half_turns=gate.exponent, axis_half_turns=0) elif isinstance(gate, ops.YPowGate): index = qubit_map[op.qubits[0]] stepper.simulate_w( index=index, half_turns=gate.exponent, axis_half_turns=0.5) elif isinstance(gate, ops.PhasedXPowGate): index = qubit_map[op.qubits[0]] stepper.simulate_w( index=index, half_turns=gate.exponent, axis_half_turns=gate.phase_exponent) elif isinstance(gate, ops.MeasurementGate): if perform_measurements: invert_mask = ( gate.invert_mask or len(op.qubits) * (False,)) for qubit, invert in zip(op.qubits, invert_mask): index = qubit_map[qubit] result = stepper.simulate_measurement(index) if invert: result = not result key = protocols.measurement_key(gate) measurements[key].append(result) else: # coverage: ignore raise TypeError('{!r} is not supported by the ' 'xmon simulator.'.format(gate)) stepper.simulate_phases(phase_map) yield XmonStepResult(stepper, qubit_map, measurements)
python
def _base_iterator( self, circuit: circuits.Circuit, qubit_order: ops.QubitOrderOrList, initial_state: Union[int, np.ndarray], perform_measurements: bool=True, ) -> Iterator['XmonStepResult']: """See definition in `cirq.SimulatesIntermediateState`. If the initial state is an int, the state is set to the computational basis state corresponding to this state. Otherwise if the initial state is a np.ndarray it is the full initial state. In this case it must be the correct size, be normalized (an L2 norm of 1), and be safely castable to an appropriate dtype for the simulator. """ qubits = ops.QubitOrder.as_qubit_order(qubit_order).order_for( circuit.all_qubits()) qubit_map = {q: i for i, q in enumerate(reversed(qubits))} if isinstance(initial_state, np.ndarray): initial_state = initial_state.astype(dtype=np.complex64, casting='safe') with xmon_stepper.Stepper( num_qubits=len(qubits), num_prefix_qubits=self.options.num_prefix_qubits, initial_state=initial_state, min_qubits_before_shard=self.options.min_qubits_before_shard, use_processes=self.options.use_processes ) as stepper: if len(circuit) == 0: yield XmonStepResult(stepper, qubit_map, {}) for moment in circuit: measurements = collections.defaultdict( list) # type: Dict[str, List[bool]] phase_map = {} # type: Dict[Tuple[int, ...], float] for op in moment.operations: gate = cast(ops.GateOperation, op).gate if isinstance(gate, ops.ZPowGate): index = qubit_map[op.qubits[0]] phase_map[(index,)] = cast(float, gate.exponent) elif isinstance(gate, ops.CZPowGate): index0 = qubit_map[op.qubits[0]] index1 = qubit_map[op.qubits[1]] phase_map[(index0, index1)] = cast(float, gate.exponent) elif isinstance(gate, ops.XPowGate): index = qubit_map[op.qubits[0]] stepper.simulate_w( index=index, half_turns=gate.exponent, axis_half_turns=0) elif isinstance(gate, ops.YPowGate): index = qubit_map[op.qubits[0]] stepper.simulate_w( index=index, half_turns=gate.exponent, axis_half_turns=0.5) elif isinstance(gate, ops.PhasedXPowGate): index = qubit_map[op.qubits[0]] stepper.simulate_w( index=index, half_turns=gate.exponent, axis_half_turns=gate.phase_exponent) elif isinstance(gate, ops.MeasurementGate): if perform_measurements: invert_mask = ( gate.invert_mask or len(op.qubits) * (False,)) for qubit, invert in zip(op.qubits, invert_mask): index = qubit_map[qubit] result = stepper.simulate_measurement(index) if invert: result = not result key = protocols.measurement_key(gate) measurements[key].append(result) else: # coverage: ignore raise TypeError('{!r} is not supported by the ' 'xmon simulator.'.format(gate)) stepper.simulate_phases(phase_map) yield XmonStepResult(stepper, qubit_map, measurements)
['def', '_base_iterator', '(', 'self', ',', 'circuit', ':', 'circuits', '.', 'Circuit', ',', 'qubit_order', ':', 'ops', '.', 'QubitOrderOrList', ',', 'initial_state', ':', 'Union', '[', 'int', ',', 'np', '.', 'ndarray', ']', ',', 'perform_measurements', ':', 'bool', '=', 'True', ',', ')', '->', 'Iterator', '[', "'XmonStepResult'", ']', ':', 'qubits', '=', 'ops', '.', 'QubitOrder', '.', 'as_qubit_order', '(', 'qubit_order', ')', '.', 'order_for', '(', 'circuit', '.', 'all_qubits', '(', ')', ')', 'qubit_map', '=', '{', 'q', ':', 'i', 'for', 'i', ',', 'q', 'in', 'enumerate', '(', 'reversed', '(', 'qubits', ')', ')', '}', 'if', 'isinstance', '(', 'initial_state', ',', 'np', '.', 'ndarray', ')', ':', 'initial_state', '=', 'initial_state', '.', 'astype', '(', 'dtype', '=', 'np', '.', 'complex64', ',', 'casting', '=', "'safe'", ')', 'with', 'xmon_stepper', '.', 'Stepper', '(', 'num_qubits', '=', 'len', '(', 'qubits', ')', ',', 'num_prefix_qubits', '=', 'self', '.', 'options', '.', 'num_prefix_qubits', ',', 'initial_state', '=', 'initial_state', ',', 'min_qubits_before_shard', '=', 'self', '.', 'options', '.', 'min_qubits_before_shard', ',', 'use_processes', '=', 'self', '.', 'options', '.', 'use_processes', ')', 'as', 'stepper', ':', 'if', 'len', '(', 'circuit', ')', '==', '0', ':', 'yield', 'XmonStepResult', '(', 'stepper', ',', 'qubit_map', ',', '{', '}', ')', 'for', 'moment', 'in', 'circuit', ':', 'measurements', '=', 'collections', '.', 'defaultdict', '(', 'list', ')', '# type: Dict[str, List[bool]]', 'phase_map', '=', '{', '}', '# type: Dict[Tuple[int, ...], float]', 'for', 'op', 'in', 'moment', '.', 'operations', ':', 'gate', '=', 'cast', '(', 'ops', '.', 'GateOperation', ',', 'op', ')', '.', 'gate', 'if', 'isinstance', '(', 'gate', ',', 'ops', '.', 'ZPowGate', ')', ':', 'index', '=', 'qubit_map', '[', 'op', '.', 'qubits', '[', '0', ']', ']', 'phase_map', '[', '(', 'index', ',', ')', ']', '=', 'cast', '(', 'float', ',', 'gate', '.', 'exponent', ')', 'elif', 'isinstance', '(', 'gate', ',', 'ops', '.', 'CZPowGate', ')', ':', 'index0', '=', 'qubit_map', '[', 'op', '.', 'qubits', '[', '0', ']', ']', 'index1', '=', 'qubit_map', '[', 'op', '.', 'qubits', '[', '1', ']', ']', 'phase_map', '[', '(', 'index0', ',', 'index1', ')', ']', '=', 'cast', '(', 'float', ',', 'gate', '.', 'exponent', ')', 'elif', 'isinstance', '(', 'gate', ',', 'ops', '.', 'XPowGate', ')', ':', 'index', '=', 'qubit_map', '[', 'op', '.', 'qubits', '[', '0', ']', ']', 'stepper', '.', 'simulate_w', '(', 'index', '=', 'index', ',', 'half_turns', '=', 'gate', '.', 'exponent', ',', 'axis_half_turns', '=', '0', ')', 'elif', 'isinstance', '(', 'gate', ',', 'ops', '.', 'YPowGate', ')', ':', 'index', '=', 'qubit_map', '[', 'op', '.', 'qubits', '[', '0', ']', ']', 'stepper', '.', 'simulate_w', '(', 'index', '=', 'index', ',', 'half_turns', '=', 'gate', '.', 'exponent', ',', 'axis_half_turns', '=', '0.5', ')', 'elif', 'isinstance', '(', 'gate', ',', 'ops', '.', 'PhasedXPowGate', ')', ':', 'index', '=', 'qubit_map', '[', 'op', '.', 'qubits', '[', '0', ']', ']', 'stepper', '.', 'simulate_w', '(', 'index', '=', 'index', ',', 'half_turns', '=', 'gate', '.', 'exponent', ',', 'axis_half_turns', '=', 'gate', '.', 'phase_exponent', ')', 'elif', 'isinstance', '(', 'gate', ',', 'ops', '.', 'MeasurementGate', ')', ':', 'if', 'perform_measurements', ':', 'invert_mask', '=', '(', 'gate', '.', 'invert_mask', 'or', 'len', '(', 'op', '.', 'qubits', ')', '*', '(', 'False', ',', ')', ')', 'for', 'qubit', ',', 'invert', 'in', 'zip', '(', 'op', '.', 'qubits', ',', 'invert_mask', ')', ':', 'index', '=', 'qubit_map', '[', 'qubit', ']', 'result', '=', 'stepper', '.', 'simulate_measurement', '(', 'index', ')', 'if', 'invert', ':', 'result', '=', 'not', 'result', 'key', '=', 'protocols', '.', 'measurement_key', '(', 'gate', ')', 'measurements', '[', 'key', ']', '.', 'append', '(', 'result', ')', 'else', ':', '# coverage: ignore', 'raise', 'TypeError', '(', "'{!r} is not supported by the '", "'xmon simulator.'", '.', 'format', '(', 'gate', ')', ')', 'stepper', '.', 'simulate_phases', '(', 'phase_map', ')', 'yield', 'XmonStepResult', '(', 'stepper', ',', 'qubit_map', ',', 'measurements', ')']
See definition in `cirq.SimulatesIntermediateState`. If the initial state is an int, the state is set to the computational basis state corresponding to this state. Otherwise if the initial state is a np.ndarray it is the full initial state. In this case it must be the correct size, be normalized (an L2 norm of 1), and be safely castable to an appropriate dtype for the simulator.
['See', 'definition', 'in', 'cirq', '.', 'SimulatesIntermediateState', '.']
train
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/google/sim/xmon_simulator.py#L211-L290