repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
mar10/pyftpsync
ftpsync/targets.py
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/targets.py#L172-L176
def get_options_dict(self): """Return options from synchronizer (possibly overridden by own extra_opts).""" d = self.synchronizer.options if self.synchronizer else {} d.update(self.extra_opts) return d
[ "def", "get_options_dict", "(", "self", ")", ":", "d", "=", "self", ".", "synchronizer", ".", "options", "if", "self", ".", "synchronizer", "else", "{", "}", "d", ".", "update", "(", "self", ".", "extra_opts", ")", "return", "d" ]
Return options from synchronizer (possibly overridden by own extra_opts).
[ "Return", "options", "from", "synchronizer", "(", "possibly", "overridden", "by", "own", "extra_opts", ")", "." ]
python
train
pokerregion/poker
poker/commands.py
https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/commands.py#L100-L119
def p5list(num): """List pocketfives ranked players, max 100 if no NUM, or NUM if specified.""" from .website.pocketfives import get_ranked_players format_str = '{:>4.4} {!s:<15.13}{!s:<18.15}{!s:<9.6}{!s:<10.7}'\ '{!s:<14.11}{!s:<12.9}{!s:<12.9}{!s:<12.9}{!s:<4.4}' click.echo(format_str.format( 'Rank' , 'Player name', 'Country', 'Triple', 'Monthly', 'Biggest cash', 'PLB score', 'Biggest s', 'Average s', 'Prev' )) # just generate the appropriate number of underlines and cut them with format_str underlines = ['-' * 20] * 10 click.echo(format_str.format(*underlines)) for ind, player in enumerate(get_ranked_players()): click.echo(format_str.format(str(ind + 1) + '.', *player)) if ind == num - 1: break
[ "def", "p5list", "(", "num", ")", ":", "from", ".", "website", ".", "pocketfives", "import", "get_ranked_players", "format_str", "=", "'{:>4.4} {!s:<15.13}{!s:<18.15}{!s:<9.6}{!s:<10.7}'", "'{!s:<14.11}{!s:<12.9}{!s:<12.9}{!s:<12.9}{!s:<4.4}'", "click", ".", "echo", "(", "format_str", ".", "format", "(", "'Rank'", ",", "'Player name'", ",", "'Country'", ",", "'Triple'", ",", "'Monthly'", ",", "'Biggest cash'", ",", "'PLB score'", ",", "'Biggest s'", ",", "'Average s'", ",", "'Prev'", ")", ")", "# just generate the appropriate number of underlines and cut them with format_str", "underlines", "=", "[", "'-'", "*", "20", "]", "*", "10", "click", ".", "echo", "(", "format_str", ".", "format", "(", "*", "underlines", ")", ")", "for", "ind", ",", "player", "in", "enumerate", "(", "get_ranked_players", "(", ")", ")", ":", "click", ".", "echo", "(", "format_str", ".", "format", "(", "str", "(", "ind", "+", "1", ")", "+", "'.'", ",", "*", "player", ")", ")", "if", "ind", "==", "num", "-", "1", ":", "break" ]
List pocketfives ranked players, max 100 if no NUM, or NUM if specified.
[ "List", "pocketfives", "ranked", "players", "max", "100", "if", "no", "NUM", "or", "NUM", "if", "specified", "." ]
python
train
django-ses/django-ses
django_ses/utils.py
https://github.com/django-ses/django-ses/blob/2f0fd8e3fdc76d3512982c0bb8e2f6e93e09fa3c/django_ses/utils.py#L117-L136
def _get_cert_url(self): """ Get the signing certificate URL. Only accept urls that match the domains set in the AWS_SNS_BOUNCE_CERT_TRUSTED_DOMAINS setting. Sub-domains are allowed. i.e. if amazonaws.com is in the trusted domains then sns.us-east-1.amazonaws.com will match. """ cert_url = self._data.get('SigningCertURL') if cert_url: if cert_url.startswith('https://'): url_obj = urlparse(cert_url) for trusted_domain in settings.BOUNCE_CERT_DOMAINS: parts = trusted_domain.split('.') if url_obj.netloc.split('.')[-len(parts):] == parts: return cert_url logger.warning(u'Untrusted certificate URL: "%s"', cert_url) else: logger.warning(u'No signing certificate URL: "%s"', cert_url) return None
[ "def", "_get_cert_url", "(", "self", ")", ":", "cert_url", "=", "self", ".", "_data", ".", "get", "(", "'SigningCertURL'", ")", "if", "cert_url", ":", "if", "cert_url", ".", "startswith", "(", "'https://'", ")", ":", "url_obj", "=", "urlparse", "(", "cert_url", ")", "for", "trusted_domain", "in", "settings", ".", "BOUNCE_CERT_DOMAINS", ":", "parts", "=", "trusted_domain", ".", "split", "(", "'.'", ")", "if", "url_obj", ".", "netloc", ".", "split", "(", "'.'", ")", "[", "-", "len", "(", "parts", ")", ":", "]", "==", "parts", ":", "return", "cert_url", "logger", ".", "warning", "(", "u'Untrusted certificate URL: \"%s\"'", ",", "cert_url", ")", "else", ":", "logger", ".", "warning", "(", "u'No signing certificate URL: \"%s\"'", ",", "cert_url", ")", "return", "None" ]
Get the signing certificate URL. Only accept urls that match the domains set in the AWS_SNS_BOUNCE_CERT_TRUSTED_DOMAINS setting. Sub-domains are allowed. i.e. if amazonaws.com is in the trusted domains then sns.us-east-1.amazonaws.com will match.
[ "Get", "the", "signing", "certificate", "URL", ".", "Only", "accept", "urls", "that", "match", "the", "domains", "set", "in", "the", "AWS_SNS_BOUNCE_CERT_TRUSTED_DOMAINS", "setting", ".", "Sub", "-", "domains", "are", "allowed", ".", "i", ".", "e", ".", "if", "amazonaws", ".", "com", "is", "in", "the", "trusted", "domains", "then", "sns", ".", "us", "-", "east", "-", "1", ".", "amazonaws", ".", "com", "will", "match", "." ]
python
train
pylast/pylast
src/pylast/__init__.py
https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L1334-L1346
def get_tags(self): """Returns a list of the tags set by the user to this object.""" # Uncacheable because it can be dynamically changed by the user. params = self._get_params() doc = self._request(self.ws_prefix + ".getTags", False, params) tag_names = _extract_all(doc, "name") tags = [] for tag in tag_names: tags.append(Tag(tag, self.network)) return tags
[ "def", "get_tags", "(", "self", ")", ":", "# Uncacheable because it can be dynamically changed by the user.", "params", "=", "self", ".", "_get_params", "(", ")", "doc", "=", "self", ".", "_request", "(", "self", ".", "ws_prefix", "+", "\".getTags\"", ",", "False", ",", "params", ")", "tag_names", "=", "_extract_all", "(", "doc", ",", "\"name\"", ")", "tags", "=", "[", "]", "for", "tag", "in", "tag_names", ":", "tags", ".", "append", "(", "Tag", "(", "tag", ",", "self", ".", "network", ")", ")", "return", "tags" ]
Returns a list of the tags set by the user to this object.
[ "Returns", "a", "list", "of", "the", "tags", "set", "by", "the", "user", "to", "this", "object", "." ]
python
train
thespacedoctor/sherlock
sherlock/transient_catalogue_crossmatch.py
https://github.com/thespacedoctor/sherlock/blob/2c80fb6fa31b04e7820e6928e3d437a21e692dd3/sherlock/transient_catalogue_crossmatch.py#L88-L262
def match(self): """ *match the transients against the sherlock-catalogues according to the search algorithm and return matches alongside the predicted classification(s)* **Return:** - ``classification`` -- the crossmatch results and classifications assigned to the transients See the class docstring for usage. .. todo :: - update key arguments values and definitions with defaults - update return values and definitions - update usage examples and text - update docstring text - check sublime snippet exists - clip any useful text to docs mindmap - regenerate the docs and check redendering of this docstring """ self.log.debug('starting the ``match`` method') classifications = [] # COUNT NUMBER OF TRANSIENT TO CROSSMATCH numberOfTransients = len(self.transients) count = 0 # GRAB SEARCH ALGORITHM sa = self.settings["search algorithm"] # FOR EACH TRANSIENT SOURCE IN THE LIST ... allCatalogueMatches = [] catalogueMatches = [] nonSynonymTransients = self.transients[:] # SYNONYM SEARCHES # ITERATE THROUGH SEARCH ALGORITHM IN ORDER # PRESENTED IN THE SETTINGS FILE brightnessFilters = ["bright", "faint", "general"] for search_name, searchPara in sa.iteritems(): for bf in brightnessFilters: if bf not in searchPara: continue if "synonym" not in searchPara[bf] or searchPara[bf]["synonym"] == False: continue self.log.debug(""" searching: %(search_name)s""" % locals()) if "physical radius kpc" in searchPara[bf]: # THE PHYSICAL SEPARATION SEARCHES self.log.debug( 'checking physical distance crossmatches in %(search_name)s' % locals()) catalogueMatches = self.physical_separation_crossmatch_against_catalogue( objectList=self.transients, searchPara=searchPara, search_name=search_name + " physical", brightnessFilter=bf, classificationType="synonym" ) else: # THE ANGULAR SEPARATION SEARCHES self.log.debug( 'Crossmatching against %(search_name)s' % locals()) # RENAMED from searchCatalogue catalogueMatches = self.angular_crossmatch_against_catalogue( objectList=self.transients, searchPara=searchPara, search_name=search_name + " angular", brightnessFilter=bf, classificationType="synonym" ) # ADD CLASSIFICATION AND CROSSMATCHES IF FOUND if catalogueMatches: allCatalogueMatches = allCatalogueMatches + catalogueMatches synonymIDs = [] synonymIDs[:] = [xm["transient_object_id"] for xm in allCatalogueMatches] nonSynonymTransients = [] nonSynonymTransients[:] = [ t for t in self.transients if t["id"] not in synonymIDs] # ASSOCIATION SEARCHES # ITERATE THROUGH SEARCH ALGORITHM IN ORDER # PRESENTED IN THE SETTINGS FILE if len(nonSynonymTransients) > 0: for search_name, searchPara in sa.iteritems(): self.log.debug(""" searching: %(search_name)s""" % locals()) for bf in brightnessFilters: if bf not in searchPara: continue if "association" not in searchPara[bf] or searchPara[bf]["association"] == False: continue if "physical radius kpc" in searchPara[bf]: # THE PHYSICAL SEPARATION SEARCHES self.log.debug( 'checking physical distance crossmatches in %(search_name)s' % locals()) catalogueMatches = self.physical_separation_crossmatch_against_catalogue( objectList=nonSynonymTransients, searchPara=searchPara, search_name=search_name + " physical", brightnessFilter=bf, classificationType="association" ) else: # THE ANGULAR SEPARATION SEARCHES self.log.debug( 'Crossmatching against %(search_name)s' % locals()) # RENAMED from searchCatalogue catalogueMatches = self.angular_crossmatch_against_catalogue( objectList=nonSynonymTransients, searchPara=searchPara, search_name=search_name + " angular", brightnessFilter=bf, classificationType="association" ) # ADD CLASSIFICATION AND CROSSMATCHES IF FOUND if catalogueMatches: allCatalogueMatches = allCatalogueMatches + catalogueMatches catalogueMatches = [] associationIDs = [] associationIDs[:] = [xm["transient_object_id"] for xm in allCatalogueMatches] nonAssociationTransients = [] nonAssociationTransients[:] = [ t for t in self.transients if t["id"] not in associationIDs] # ANNOTATION SEARCHES # ITERATE THROUGH SEARCH ALGORITHM IN ORDER # PRESENTED IN THE SETTINGS FILE brightnessFilters = ["bright", "faint", "general"] for search_name, searchPara in sa.iteritems(): for bf in brightnessFilters: if bf not in searchPara: continue if "annotation" not in searchPara[bf] or searchPara[bf]["annotation"] == False: continue self.log.debug(""" searching: %(search_name)s""" % locals()) if "physical radius kpc" in searchPara[bf]: # THE PHYSICAL SEPARATION SEARCHES self.log.debug( 'checking physical distance crossmatches in %(search_name)s' % locals()) if bf in searchPara: catalogueMatches = self.physical_separation_crossmatch_against_catalogue( objectList=nonAssociationTransients, searchPara=searchPara, search_name=search_name + " physical", brightnessFilter=bf, classificationType="annotation" ) else: # THE ANGULAR SEPARATION SEARCHES self.log.debug( 'Crossmatching against %(search_name)s' % locals()) # RENAMED from searchCatalogue if bf in searchPara: catalogueMatches = self.angular_crossmatch_against_catalogue( objectList=nonAssociationTransients, searchPara=searchPara, search_name=search_name + " angular", brightnessFilter=bf, classificationType="annotation" ) # ADD CLASSIFICATION AND CROSSMATCHES IF FOUND if catalogueMatches: allCatalogueMatches = allCatalogueMatches + catalogueMatches self.log.debug('completed the ``match`` method') return allCatalogueMatches
[ "def", "match", "(", "self", ")", ":", "self", ".", "log", ".", "debug", "(", "'starting the ``match`` method'", ")", "classifications", "=", "[", "]", "# COUNT NUMBER OF TRANSIENT TO CROSSMATCH", "numberOfTransients", "=", "len", "(", "self", ".", "transients", ")", "count", "=", "0", "# GRAB SEARCH ALGORITHM", "sa", "=", "self", ".", "settings", "[", "\"search algorithm\"", "]", "# FOR EACH TRANSIENT SOURCE IN THE LIST ...", "allCatalogueMatches", "=", "[", "]", "catalogueMatches", "=", "[", "]", "nonSynonymTransients", "=", "self", ".", "transients", "[", ":", "]", "# SYNONYM SEARCHES", "# ITERATE THROUGH SEARCH ALGORITHM IN ORDER", "# PRESENTED IN THE SETTINGS FILE", "brightnessFilters", "=", "[", "\"bright\"", ",", "\"faint\"", ",", "\"general\"", "]", "for", "search_name", ",", "searchPara", "in", "sa", ".", "iteritems", "(", ")", ":", "for", "bf", "in", "brightnessFilters", ":", "if", "bf", "not", "in", "searchPara", ":", "continue", "if", "\"synonym\"", "not", "in", "searchPara", "[", "bf", "]", "or", "searchPara", "[", "bf", "]", "[", "\"synonym\"", "]", "==", "False", ":", "continue", "self", ".", "log", ".", "debug", "(", "\"\"\" searching: %(search_name)s\"\"\"", "%", "locals", "(", ")", ")", "if", "\"physical radius kpc\"", "in", "searchPara", "[", "bf", "]", ":", "# THE PHYSICAL SEPARATION SEARCHES", "self", ".", "log", ".", "debug", "(", "'checking physical distance crossmatches in %(search_name)s'", "%", "locals", "(", ")", ")", "catalogueMatches", "=", "self", ".", "physical_separation_crossmatch_against_catalogue", "(", "objectList", "=", "self", ".", "transients", ",", "searchPara", "=", "searchPara", ",", "search_name", "=", "search_name", "+", "\" physical\"", ",", "brightnessFilter", "=", "bf", ",", "classificationType", "=", "\"synonym\"", ")", "else", ":", "# THE ANGULAR SEPARATION SEARCHES", "self", ".", "log", ".", "debug", "(", "'Crossmatching against %(search_name)s'", "%", "locals", "(", ")", ")", "# RENAMED from searchCatalogue", "catalogueMatches", "=", "self", ".", "angular_crossmatch_against_catalogue", "(", "objectList", "=", "self", ".", "transients", ",", "searchPara", "=", "searchPara", ",", "search_name", "=", "search_name", "+", "\" angular\"", ",", "brightnessFilter", "=", "bf", ",", "classificationType", "=", "\"synonym\"", ")", "# ADD CLASSIFICATION AND CROSSMATCHES IF FOUND", "if", "catalogueMatches", ":", "allCatalogueMatches", "=", "allCatalogueMatches", "+", "catalogueMatches", "synonymIDs", "=", "[", "]", "synonymIDs", "[", ":", "]", "=", "[", "xm", "[", "\"transient_object_id\"", "]", "for", "xm", "in", "allCatalogueMatches", "]", "nonSynonymTransients", "=", "[", "]", "nonSynonymTransients", "[", ":", "]", "=", "[", "t", "for", "t", "in", "self", ".", "transients", "if", "t", "[", "\"id\"", "]", "not", "in", "synonymIDs", "]", "# ASSOCIATION SEARCHES", "# ITERATE THROUGH SEARCH ALGORITHM IN ORDER", "# PRESENTED IN THE SETTINGS FILE", "if", "len", "(", "nonSynonymTransients", ")", ">", "0", ":", "for", "search_name", ",", "searchPara", "in", "sa", ".", "iteritems", "(", ")", ":", "self", ".", "log", ".", "debug", "(", "\"\"\" searching: %(search_name)s\"\"\"", "%", "locals", "(", ")", ")", "for", "bf", "in", "brightnessFilters", ":", "if", "bf", "not", "in", "searchPara", ":", "continue", "if", "\"association\"", "not", "in", "searchPara", "[", "bf", "]", "or", "searchPara", "[", "bf", "]", "[", "\"association\"", "]", "==", "False", ":", "continue", "if", "\"physical radius kpc\"", "in", "searchPara", "[", "bf", "]", ":", "# THE PHYSICAL SEPARATION SEARCHES", "self", ".", "log", ".", "debug", "(", "'checking physical distance crossmatches in %(search_name)s'", "%", "locals", "(", ")", ")", "catalogueMatches", "=", "self", ".", "physical_separation_crossmatch_against_catalogue", "(", "objectList", "=", "nonSynonymTransients", ",", "searchPara", "=", "searchPara", ",", "search_name", "=", "search_name", "+", "\" physical\"", ",", "brightnessFilter", "=", "bf", ",", "classificationType", "=", "\"association\"", ")", "else", ":", "# THE ANGULAR SEPARATION SEARCHES", "self", ".", "log", ".", "debug", "(", "'Crossmatching against %(search_name)s'", "%", "locals", "(", ")", ")", "# RENAMED from searchCatalogue", "catalogueMatches", "=", "self", ".", "angular_crossmatch_against_catalogue", "(", "objectList", "=", "nonSynonymTransients", ",", "searchPara", "=", "searchPara", ",", "search_name", "=", "search_name", "+", "\" angular\"", ",", "brightnessFilter", "=", "bf", ",", "classificationType", "=", "\"association\"", ")", "# ADD CLASSIFICATION AND CROSSMATCHES IF FOUND", "if", "catalogueMatches", ":", "allCatalogueMatches", "=", "allCatalogueMatches", "+", "catalogueMatches", "catalogueMatches", "=", "[", "]", "associationIDs", "=", "[", "]", "associationIDs", "[", ":", "]", "=", "[", "xm", "[", "\"transient_object_id\"", "]", "for", "xm", "in", "allCatalogueMatches", "]", "nonAssociationTransients", "=", "[", "]", "nonAssociationTransients", "[", ":", "]", "=", "[", "t", "for", "t", "in", "self", ".", "transients", "if", "t", "[", "\"id\"", "]", "not", "in", "associationIDs", "]", "# ANNOTATION SEARCHES", "# ITERATE THROUGH SEARCH ALGORITHM IN ORDER", "# PRESENTED IN THE SETTINGS FILE", "brightnessFilters", "=", "[", "\"bright\"", ",", "\"faint\"", ",", "\"general\"", "]", "for", "search_name", ",", "searchPara", "in", "sa", ".", "iteritems", "(", ")", ":", "for", "bf", "in", "brightnessFilters", ":", "if", "bf", "not", "in", "searchPara", ":", "continue", "if", "\"annotation\"", "not", "in", "searchPara", "[", "bf", "]", "or", "searchPara", "[", "bf", "]", "[", "\"annotation\"", "]", "==", "False", ":", "continue", "self", ".", "log", ".", "debug", "(", "\"\"\" searching: %(search_name)s\"\"\"", "%", "locals", "(", ")", ")", "if", "\"physical radius kpc\"", "in", "searchPara", "[", "bf", "]", ":", "# THE PHYSICAL SEPARATION SEARCHES", "self", ".", "log", ".", "debug", "(", "'checking physical distance crossmatches in %(search_name)s'", "%", "locals", "(", ")", ")", "if", "bf", "in", "searchPara", ":", "catalogueMatches", "=", "self", ".", "physical_separation_crossmatch_against_catalogue", "(", "objectList", "=", "nonAssociationTransients", ",", "searchPara", "=", "searchPara", ",", "search_name", "=", "search_name", "+", "\" physical\"", ",", "brightnessFilter", "=", "bf", ",", "classificationType", "=", "\"annotation\"", ")", "else", ":", "# THE ANGULAR SEPARATION SEARCHES", "self", ".", "log", ".", "debug", "(", "'Crossmatching against %(search_name)s'", "%", "locals", "(", ")", ")", "# RENAMED from searchCatalogue", "if", "bf", "in", "searchPara", ":", "catalogueMatches", "=", "self", ".", "angular_crossmatch_against_catalogue", "(", "objectList", "=", "nonAssociationTransients", ",", "searchPara", "=", "searchPara", ",", "search_name", "=", "search_name", "+", "\" angular\"", ",", "brightnessFilter", "=", "bf", ",", "classificationType", "=", "\"annotation\"", ")", "# ADD CLASSIFICATION AND CROSSMATCHES IF FOUND", "if", "catalogueMatches", ":", "allCatalogueMatches", "=", "allCatalogueMatches", "+", "catalogueMatches", "self", ".", "log", ".", "debug", "(", "'completed the ``match`` method'", ")", "return", "allCatalogueMatches" ]
*match the transients against the sherlock-catalogues according to the search algorithm and return matches alongside the predicted classification(s)* **Return:** - ``classification`` -- the crossmatch results and classifications assigned to the transients See the class docstring for usage. .. todo :: - update key arguments values and definitions with defaults - update return values and definitions - update usage examples and text - update docstring text - check sublime snippet exists - clip any useful text to docs mindmap - regenerate the docs and check redendering of this docstring
[ "*", "match", "the", "transients", "against", "the", "sherlock", "-", "catalogues", "according", "to", "the", "search", "algorithm", "and", "return", "matches", "alongside", "the", "predicted", "classification", "(", "s", ")", "*" ]
python
train
mixer/beam-interactive-python
beam_interactive/proto/rw.py
https://github.com/mixer/beam-interactive-python/blob/e035bc45515dea9315b77648a24b5ae8685aa5cf/beam_interactive/proto/rw.py#L35-L51
def decode(self, bytes): """ Decodes the packet off the byte string. """ self.buffer = bytes self._pos = 0 Packet = identifier.get_packet_from_id(self._read_variunt()) # unknown packets will be None from the identifier if Packet is None: return None packet = Packet() packet.ParseFromString(self.remaining_bytes()) return packet
[ "def", "decode", "(", "self", ",", "bytes", ")", ":", "self", ".", "buffer", "=", "bytes", "self", ".", "_pos", "=", "0", "Packet", "=", "identifier", ".", "get_packet_from_id", "(", "self", ".", "_read_variunt", "(", ")", ")", "# unknown packets will be None from the identifier", "if", "Packet", "is", "None", ":", "return", "None", "packet", "=", "Packet", "(", ")", "packet", ".", "ParseFromString", "(", "self", ".", "remaining_bytes", "(", ")", ")", "return", "packet" ]
Decodes the packet off the byte string.
[ "Decodes", "the", "packet", "off", "the", "byte", "string", "." ]
python
train
materialsproject/pymatgen
pymatgen/analysis/elasticity/elastic.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/elasticity/elastic.py#L640-L662
def get_heat_capacity(self, temperature, structure, n, u, cutoff=1e2): """ Gets the directional heat capacity for a higher order tensor expansion as a function of direction and polarization. Args: temperature (float): Temperature in kelvin structure (float): Structure to be used in directional heat capacity determination n (3x1 array-like): direction for Cv determination u (3x1 array-like): polarization direction, note that no attempt for verification of eigenvectors is made cutoff (float): cutoff for scale of kt / (hbar * omega) if lower than this value, returns 0 """ k = 1.38065e-23 kt = k*temperature hbar_w = 1.05457e-34*self.omega(structure, n, u) if hbar_w > kt * cutoff: return 0.0 c = k * (hbar_w / kt) ** 2 c *= np.exp(hbar_w / kt) / (np.exp(hbar_w / kt) - 1)**2 return c * 6.022e23
[ "def", "get_heat_capacity", "(", "self", ",", "temperature", ",", "structure", ",", "n", ",", "u", ",", "cutoff", "=", "1e2", ")", ":", "k", "=", "1.38065e-23", "kt", "=", "k", "*", "temperature", "hbar_w", "=", "1.05457e-34", "*", "self", ".", "omega", "(", "structure", ",", "n", ",", "u", ")", "if", "hbar_w", ">", "kt", "*", "cutoff", ":", "return", "0.0", "c", "=", "k", "*", "(", "hbar_w", "/", "kt", ")", "**", "2", "c", "*=", "np", ".", "exp", "(", "hbar_w", "/", "kt", ")", "/", "(", "np", ".", "exp", "(", "hbar_w", "/", "kt", ")", "-", "1", ")", "**", "2", "return", "c", "*", "6.022e23" ]
Gets the directional heat capacity for a higher order tensor expansion as a function of direction and polarization. Args: temperature (float): Temperature in kelvin structure (float): Structure to be used in directional heat capacity determination n (3x1 array-like): direction for Cv determination u (3x1 array-like): polarization direction, note that no attempt for verification of eigenvectors is made cutoff (float): cutoff for scale of kt / (hbar * omega) if lower than this value, returns 0
[ "Gets", "the", "directional", "heat", "capacity", "for", "a", "higher", "order", "tensor", "expansion", "as", "a", "function", "of", "direction", "and", "polarization", "." ]
python
train
phaethon/kamene
kamene/crypto/cert.py
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/crypto/cert.py#L1957-L1980
def is_revoked(self, crl_list): """ Given a list of trusted CRL (their signature has already been verified with trusted anchors), this function returns True if the certificate is marked as revoked by one of those CRL. Note that if the Certificate was on hold in a previous CRL and is now valid again in a new CRL and bot are in the list, it will be considered revoked: this is because _all_ CRLs are checked (not only the freshest) and revocation status is not handled. Also note that the check on the issuer is performed on the Authority Key Identifier if available in _both_ the CRL and the Cert. Otherwise, the issuers are simply compared. """ for c in crl_list: if (self.authorityKeyID is not None and c.authorityKeyID is not None and self.authorityKeyID == c.authorityKeyID): return self.serial in map(lambda x: x[0], c.revoked_cert_serials) elif (self.issuer == c.issuer): return self.serial in map(lambda x: x[0], c.revoked_cert_serials) return False
[ "def", "is_revoked", "(", "self", ",", "crl_list", ")", ":", "for", "c", "in", "crl_list", ":", "if", "(", "self", ".", "authorityKeyID", "is", "not", "None", "and", "c", ".", "authorityKeyID", "is", "not", "None", "and", "self", ".", "authorityKeyID", "==", "c", ".", "authorityKeyID", ")", ":", "return", "self", ".", "serial", "in", "map", "(", "lambda", "x", ":", "x", "[", "0", "]", ",", "c", ".", "revoked_cert_serials", ")", "elif", "(", "self", ".", "issuer", "==", "c", ".", "issuer", ")", ":", "return", "self", ".", "serial", "in", "map", "(", "lambda", "x", ":", "x", "[", "0", "]", ",", "c", ".", "revoked_cert_serials", ")", "return", "False" ]
Given a list of trusted CRL (their signature has already been verified with trusted anchors), this function returns True if the certificate is marked as revoked by one of those CRL. Note that if the Certificate was on hold in a previous CRL and is now valid again in a new CRL and bot are in the list, it will be considered revoked: this is because _all_ CRLs are checked (not only the freshest) and revocation status is not handled. Also note that the check on the issuer is performed on the Authority Key Identifier if available in _both_ the CRL and the Cert. Otherwise, the issuers are simply compared.
[ "Given", "a", "list", "of", "trusted", "CRL", "(", "their", "signature", "has", "already", "been", "verified", "with", "trusted", "anchors", ")", "this", "function", "returns", "True", "if", "the", "certificate", "is", "marked", "as", "revoked", "by", "one", "of", "those", "CRL", "." ]
python
train
zsimic/runez
src/runez/system.py
https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/system.py#L21-L66
def abort(*args, **kwargs): """ Usage: return abort("...") => will sys.exit() by default return abort("...", fatal=True) => Will sys.exit() # Not fatal, but will log/print message: return abort("...", fatal=False) => Will return False return abort("...", fatal=(False, None)) => Will return None return abort("...", fatal=(False, -1)) => Will return -1 # Not fatal, will not log/print any message: return abort("...", fatal=None) => Will return None return abort("...", fatal=(None, None)) => Will return None return abort("...", fatal=(None, -1)) => Will return -1 :param args: Args passed through for error reporting :param kwargs: Args passed through for error reporting :return: kwargs["return_value"] (default: -1) to signify failure to non-fatal callers """ code = kwargs.pop("code", 1) logger = kwargs.pop("logger", LOG.error if code else LOG.info) fatal = kwargs.pop("fatal", True) return_value = fatal if isinstance(fatal, tuple) and len(fatal) == 2: fatal, return_value = fatal if logger and fatal is not None and args: if logging.root.handlers: logger(*args, **kwargs) else: sys.stderr.write("%s\n" % formatted_string(*args)) if fatal: if isinstance(fatal, type) and issubclass(fatal, BaseException): raise fatal(code) if AbortException is not None: if isinstance(AbortException, type) and issubclass(AbortException, BaseException): raise AbortException(code) return AbortException(code) return return_value
[ "def", "abort", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "code", "=", "kwargs", ".", "pop", "(", "\"code\"", ",", "1", ")", "logger", "=", "kwargs", ".", "pop", "(", "\"logger\"", ",", "LOG", ".", "error", "if", "code", "else", "LOG", ".", "info", ")", "fatal", "=", "kwargs", ".", "pop", "(", "\"fatal\"", ",", "True", ")", "return_value", "=", "fatal", "if", "isinstance", "(", "fatal", ",", "tuple", ")", "and", "len", "(", "fatal", ")", "==", "2", ":", "fatal", ",", "return_value", "=", "fatal", "if", "logger", "and", "fatal", "is", "not", "None", "and", "args", ":", "if", "logging", ".", "root", ".", "handlers", ":", "logger", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "sys", ".", "stderr", ".", "write", "(", "\"%s\\n\"", "%", "formatted_string", "(", "*", "args", ")", ")", "if", "fatal", ":", "if", "isinstance", "(", "fatal", ",", "type", ")", "and", "issubclass", "(", "fatal", ",", "BaseException", ")", ":", "raise", "fatal", "(", "code", ")", "if", "AbortException", "is", "not", "None", ":", "if", "isinstance", "(", "AbortException", ",", "type", ")", "and", "issubclass", "(", "AbortException", ",", "BaseException", ")", ":", "raise", "AbortException", "(", "code", ")", "return", "AbortException", "(", "code", ")", "return", "return_value" ]
Usage: return abort("...") => will sys.exit() by default return abort("...", fatal=True) => Will sys.exit() # Not fatal, but will log/print message: return abort("...", fatal=False) => Will return False return abort("...", fatal=(False, None)) => Will return None return abort("...", fatal=(False, -1)) => Will return -1 # Not fatal, will not log/print any message: return abort("...", fatal=None) => Will return None return abort("...", fatal=(None, None)) => Will return None return abort("...", fatal=(None, -1)) => Will return -1 :param args: Args passed through for error reporting :param kwargs: Args passed through for error reporting :return: kwargs["return_value"] (default: -1) to signify failure to non-fatal callers
[ "Usage", ":", "return", "abort", "(", "...", ")", "=", ">", "will", "sys", ".", "exit", "()", "by", "default", "return", "abort", "(", "...", "fatal", "=", "True", ")", "=", ">", "Will", "sys", ".", "exit", "()" ]
python
train
RiotGames/cloud-inquisitor
backend/cloud_inquisitor/config.py
https://github.com/RiotGames/cloud-inquisitor/blob/181dc2566ca59fc855f695b7fcc2c3b934e6ee9f/backend/cloud_inquisitor/config.py#L121-L186
def set(self, namespace, key, value, description=None): """Set (create/update) a configuration item Args: namespace (`str`): Namespace for the item key (`str`): Key of the item value (`Any`): Value of the type, must by one of `DBCString`, `DBCFloat`, `DBCInt`, `DBCArray`, `DBCJSON` or `bool` description (`str`): Description of the configuration item Returns: `None` """ if isinstance(value, DBCChoice): vtype = 'choice' elif isinstance(value, DBCString): vtype = 'string' elif isinstance(value, DBCFloat): vtype = 'float' elif isinstance(value, DBCInt): vtype = 'int' elif isinstance(value, DBCArray): vtype = 'array' elif isinstance(value, DBCJSON): vtype = 'json' elif isinstance(value, bool): vtype = 'bool' else: raise ValueError('Invalid config item type: {}'.format(type(value))) if namespace in self.__data and key in self.__data[namespace]: itm = db.ConfigItem.find_one( ConfigItem.namespace_prefix == namespace, ConfigItem.key == key ) if not itm: raise KeyError(key) itm.value = value itm.type = vtype if description: itm.description = description else: itm = ConfigItem() itm.key = key itm.value = value itm.type = vtype itm.description = description itm.namespace_prefix = namespace db.session.add(itm) db.session.commit() if namespace in self.__data: self.__data[namespace][key] = value else: self.__data[namespace] = {key: value}
[ "def", "set", "(", "self", ",", "namespace", ",", "key", ",", "value", ",", "description", "=", "None", ")", ":", "if", "isinstance", "(", "value", ",", "DBCChoice", ")", ":", "vtype", "=", "'choice'", "elif", "isinstance", "(", "value", ",", "DBCString", ")", ":", "vtype", "=", "'string'", "elif", "isinstance", "(", "value", ",", "DBCFloat", ")", ":", "vtype", "=", "'float'", "elif", "isinstance", "(", "value", ",", "DBCInt", ")", ":", "vtype", "=", "'int'", "elif", "isinstance", "(", "value", ",", "DBCArray", ")", ":", "vtype", "=", "'array'", "elif", "isinstance", "(", "value", ",", "DBCJSON", ")", ":", "vtype", "=", "'json'", "elif", "isinstance", "(", "value", ",", "bool", ")", ":", "vtype", "=", "'bool'", "else", ":", "raise", "ValueError", "(", "'Invalid config item type: {}'", ".", "format", "(", "type", "(", "value", ")", ")", ")", "if", "namespace", "in", "self", ".", "__data", "and", "key", "in", "self", ".", "__data", "[", "namespace", "]", ":", "itm", "=", "db", ".", "ConfigItem", ".", "find_one", "(", "ConfigItem", ".", "namespace_prefix", "==", "namespace", ",", "ConfigItem", ".", "key", "==", "key", ")", "if", "not", "itm", ":", "raise", "KeyError", "(", "key", ")", "itm", ".", "value", "=", "value", "itm", ".", "type", "=", "vtype", "if", "description", ":", "itm", ".", "description", "=", "description", "else", ":", "itm", "=", "ConfigItem", "(", ")", "itm", ".", "key", "=", "key", "itm", ".", "value", "=", "value", "itm", ".", "type", "=", "vtype", "itm", ".", "description", "=", "description", "itm", ".", "namespace_prefix", "=", "namespace", "db", ".", "session", ".", "add", "(", "itm", ")", "db", ".", "session", ".", "commit", "(", ")", "if", "namespace", "in", "self", ".", "__data", ":", "self", ".", "__data", "[", "namespace", "]", "[", "key", "]", "=", "value", "else", ":", "self", ".", "__data", "[", "namespace", "]", "=", "{", "key", ":", "value", "}" ]
Set (create/update) a configuration item Args: namespace (`str`): Namespace for the item key (`str`): Key of the item value (`Any`): Value of the type, must by one of `DBCString`, `DBCFloat`, `DBCInt`, `DBCArray`, `DBCJSON` or `bool` description (`str`): Description of the configuration item Returns: `None`
[ "Set", "(", "create", "/", "update", ")", "a", "configuration", "item" ]
python
train
tech-pi/doufo
src/python/doufo/convert.py
https://github.com/tech-pi/doufo/blob/3d375fef30670597768a6eef809b75b4b1b5a3fd/src/python/doufo/convert.py#L96-L116
def tuple_type_compare(types0, types1): """doufo.tuple_type_compare: compare two types if `types0` is 'bigger' than `types1`, return negative (<0); otherwise, return positive (>0). Here 'bigger' is defined by whether they are 'parent and child', or ituitively bigger Args: types0 (`type`): types0 types1 (`type`): types1 Returns: return (`int`): comparison results Raises: """ compares = [single_type_compare(types0[0], types1[0]), single_type_compare(types0[1], types1[1])] if compares[0] != 0: return compares[0] if compares[1] != 0: return compares[1] if types0[0] is types1[0] and types0[1] is types1[1]: return 0 return hash(types1) - hash(types0)
[ "def", "tuple_type_compare", "(", "types0", ",", "types1", ")", ":", "compares", "=", "[", "single_type_compare", "(", "types0", "[", "0", "]", ",", "types1", "[", "0", "]", ")", ",", "single_type_compare", "(", "types0", "[", "1", "]", ",", "types1", "[", "1", "]", ")", "]", "if", "compares", "[", "0", "]", "!=", "0", ":", "return", "compares", "[", "0", "]", "if", "compares", "[", "1", "]", "!=", "0", ":", "return", "compares", "[", "1", "]", "if", "types0", "[", "0", "]", "is", "types1", "[", "0", "]", "and", "types0", "[", "1", "]", "is", "types1", "[", "1", "]", ":", "return", "0", "return", "hash", "(", "types1", ")", "-", "hash", "(", "types0", ")" ]
doufo.tuple_type_compare: compare two types if `types0` is 'bigger' than `types1`, return negative (<0); otherwise, return positive (>0). Here 'bigger' is defined by whether they are 'parent and child', or ituitively bigger Args: types0 (`type`): types0 types1 (`type`): types1 Returns: return (`int`): comparison results Raises:
[ "doufo", ".", "tuple_type_compare", ":", "compare", "two", "types", "if", "types0", "is", "bigger", "than", "types1", "return", "negative", "(", "<0", ")", ";", "otherwise", "return", "positive", "(", ">", "0", ")", ".", "Here", "bigger", "is", "defined", "by", "whether", "they", "are", "parent", "and", "child", "or", "ituitively", "bigger", "Args", ":", "types0", "(", "type", ")", ":", "types0", "types1", "(", "type", ")", ":", "types1", "Returns", ":", "return", "(", "int", ")", ":", "comparison", "results", "Raises", ":" ]
python
train
pandas-dev/pandas
pandas/core/generic.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L9309-L9471
def tz_localize(self, tz, axis=0, level=None, copy=True, ambiguous='raise', nonexistent='raise'): """ Localize tz-naive index of a Series or DataFrame to target time zone. This operation localizes the Index. To localize the values in a timezone-naive Series, use :meth:`Series.dt.tz_localize`. Parameters ---------- tz : string or pytz.timezone object axis : the axis to localize level : int, str, default None If axis ia a MultiIndex, localize a specific level. Otherwise must be None copy : boolean, default True Also make a copy of the underlying data ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise' When clocks moved backward due to DST, ambiguous times may arise. For example in Central European Time (UTC+01), when going from 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter dictates how ambiguous times should be handled. - 'infer' will attempt to infer fall dst-transition hours based on order - bool-ndarray where True signifies a DST time, False designates a non-DST time (note that this flag is only applicable for ambiguous times) - 'NaT' will return NaT where there are ambiguous times - 'raise' will raise an AmbiguousTimeError if there are ambiguous times nonexistent : str, default 'raise' A nonexistent time does not exist in a particular timezone where clocks moved forward due to DST. Valid values are: - 'shift_forward' will shift the nonexistent time forward to the closest existing time - 'shift_backward' will shift the nonexistent time backward to the closest existing time - 'NaT' will return NaT where there are nonexistent times - timedelta objects will shift nonexistent times by the timedelta - 'raise' will raise an NonExistentTimeError if there are nonexistent times .. versionadded:: 0.24.0 Returns ------- Series or DataFrame Same type as the input. Raises ------ TypeError If the TimeSeries is tz-aware and tz is not None. Examples -------- Localize local times: >>> s = pd.Series([1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00'])) >>> s.tz_localize('CET') 2018-09-15 01:30:00+02:00 1 dtype: int64 Be careful with DST changes. When there is sequential data, pandas can infer the DST time: >>> s = pd.Series(range(7), index=pd.DatetimeIndex([ ... '2018-10-28 01:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 03:00:00', ... '2018-10-28 03:30:00'])) >>> s.tz_localize('CET', ambiguous='infer') 2018-10-28 01:30:00+02:00 0 2018-10-28 02:00:00+02:00 1 2018-10-28 02:30:00+02:00 2 2018-10-28 02:00:00+01:00 3 2018-10-28 02:30:00+01:00 4 2018-10-28 03:00:00+01:00 5 2018-10-28 03:30:00+01:00 6 dtype: int64 In some cases, inferring the DST is impossible. In such cases, you can pass an ndarray to the ambiguous parameter to set the DST explicitly >>> s = pd.Series(range(3), index=pd.DatetimeIndex([ ... '2018-10-28 01:20:00', ... '2018-10-28 02:36:00', ... '2018-10-28 03:46:00'])) >>> s.tz_localize('CET', ambiguous=np.array([True, True, False])) 2018-10-28 01:20:00+02:00 0 2018-10-28 02:36:00+02:00 1 2018-10-28 03:46:00+01:00 2 dtype: int64 If the DST transition causes nonexistent times, you can shift these dates forward or backwards with a timedelta object or `'shift_forward'` or `'shift_backwards'`. >>> s = pd.Series(range(2), index=pd.DatetimeIndex([ ... '2015-03-29 02:30:00', ... '2015-03-29 03:30:00'])) >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward') 2015-03-29 03:00:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward') 2015-03-29 01:59:59.999999999+01:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H')) 2015-03-29 03:30:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 """ nonexistent_options = ('raise', 'NaT', 'shift_forward', 'shift_backward') if nonexistent not in nonexistent_options and not isinstance( nonexistent, timedelta): raise ValueError("The nonexistent argument must be one of 'raise'," " 'NaT', 'shift_forward', 'shift_backward' or" " a timedelta object") axis = self._get_axis_number(axis) ax = self._get_axis(axis) def _tz_localize(ax, tz, ambiguous, nonexistent): if not hasattr(ax, 'tz_localize'): if len(ax) > 0: ax_name = self._get_axis_name(axis) raise TypeError('%s is not a valid DatetimeIndex or ' 'PeriodIndex' % ax_name) else: ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_localize( tz, ambiguous=ambiguous, nonexistent=nonexistent ) return ax # if a level is given it must be a MultiIndex level or # equivalent to the axis name if isinstance(ax, MultiIndex): level = ax._get_level_number(level) new_level = _tz_localize( ax.levels[level], tz, ambiguous, nonexistent ) ax = ax.set_levels(new_level, level=level) else: if level not in (None, 0, ax.name): raise ValueError("The level {0} is not valid".format(level)) ax = _tz_localize(ax, tz, ambiguous, nonexistent) result = self._constructor(self._data, copy=copy) result = result.set_axis(ax, axis=axis, inplace=False) return result.__finalize__(self)
[ "def", "tz_localize", "(", "self", ",", "tz", ",", "axis", "=", "0", ",", "level", "=", "None", ",", "copy", "=", "True", ",", "ambiguous", "=", "'raise'", ",", "nonexistent", "=", "'raise'", ")", ":", "nonexistent_options", "=", "(", "'raise'", ",", "'NaT'", ",", "'shift_forward'", ",", "'shift_backward'", ")", "if", "nonexistent", "not", "in", "nonexistent_options", "and", "not", "isinstance", "(", "nonexistent", ",", "timedelta", ")", ":", "raise", "ValueError", "(", "\"The nonexistent argument must be one of 'raise',\"", "\" 'NaT', 'shift_forward', 'shift_backward' or\"", "\" a timedelta object\"", ")", "axis", "=", "self", ".", "_get_axis_number", "(", "axis", ")", "ax", "=", "self", ".", "_get_axis", "(", "axis", ")", "def", "_tz_localize", "(", "ax", ",", "tz", ",", "ambiguous", ",", "nonexistent", ")", ":", "if", "not", "hasattr", "(", "ax", ",", "'tz_localize'", ")", ":", "if", "len", "(", "ax", ")", ">", "0", ":", "ax_name", "=", "self", ".", "_get_axis_name", "(", "axis", ")", "raise", "TypeError", "(", "'%s is not a valid DatetimeIndex or '", "'PeriodIndex'", "%", "ax_name", ")", "else", ":", "ax", "=", "DatetimeIndex", "(", "[", "]", ",", "tz", "=", "tz", ")", "else", ":", "ax", "=", "ax", ".", "tz_localize", "(", "tz", ",", "ambiguous", "=", "ambiguous", ",", "nonexistent", "=", "nonexistent", ")", "return", "ax", "# if a level is given it must be a MultiIndex level or", "# equivalent to the axis name", "if", "isinstance", "(", "ax", ",", "MultiIndex", ")", ":", "level", "=", "ax", ".", "_get_level_number", "(", "level", ")", "new_level", "=", "_tz_localize", "(", "ax", ".", "levels", "[", "level", "]", ",", "tz", ",", "ambiguous", ",", "nonexistent", ")", "ax", "=", "ax", ".", "set_levels", "(", "new_level", ",", "level", "=", "level", ")", "else", ":", "if", "level", "not", "in", "(", "None", ",", "0", ",", "ax", ".", "name", ")", ":", "raise", "ValueError", "(", "\"The level {0} is not valid\"", ".", "format", "(", "level", ")", ")", "ax", "=", "_tz_localize", "(", "ax", ",", "tz", ",", "ambiguous", ",", "nonexistent", ")", "result", "=", "self", ".", "_constructor", "(", "self", ".", "_data", ",", "copy", "=", "copy", ")", "result", "=", "result", ".", "set_axis", "(", "ax", ",", "axis", "=", "axis", ",", "inplace", "=", "False", ")", "return", "result", ".", "__finalize__", "(", "self", ")" ]
Localize tz-naive index of a Series or DataFrame to target time zone. This operation localizes the Index. To localize the values in a timezone-naive Series, use :meth:`Series.dt.tz_localize`. Parameters ---------- tz : string or pytz.timezone object axis : the axis to localize level : int, str, default None If axis ia a MultiIndex, localize a specific level. Otherwise must be None copy : boolean, default True Also make a copy of the underlying data ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise' When clocks moved backward due to DST, ambiguous times may arise. For example in Central European Time (UTC+01), when going from 03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at 00:30:00 UTC and at 01:30:00 UTC. In such a situation, the `ambiguous` parameter dictates how ambiguous times should be handled. - 'infer' will attempt to infer fall dst-transition hours based on order - bool-ndarray where True signifies a DST time, False designates a non-DST time (note that this flag is only applicable for ambiguous times) - 'NaT' will return NaT where there are ambiguous times - 'raise' will raise an AmbiguousTimeError if there are ambiguous times nonexistent : str, default 'raise' A nonexistent time does not exist in a particular timezone where clocks moved forward due to DST. Valid values are: - 'shift_forward' will shift the nonexistent time forward to the closest existing time - 'shift_backward' will shift the nonexistent time backward to the closest existing time - 'NaT' will return NaT where there are nonexistent times - timedelta objects will shift nonexistent times by the timedelta - 'raise' will raise an NonExistentTimeError if there are nonexistent times .. versionadded:: 0.24.0 Returns ------- Series or DataFrame Same type as the input. Raises ------ TypeError If the TimeSeries is tz-aware and tz is not None. Examples -------- Localize local times: >>> s = pd.Series([1], ... index=pd.DatetimeIndex(['2018-09-15 01:30:00'])) >>> s.tz_localize('CET') 2018-09-15 01:30:00+02:00 1 dtype: int64 Be careful with DST changes. When there is sequential data, pandas can infer the DST time: >>> s = pd.Series(range(7), index=pd.DatetimeIndex([ ... '2018-10-28 01:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 02:00:00', ... '2018-10-28 02:30:00', ... '2018-10-28 03:00:00', ... '2018-10-28 03:30:00'])) >>> s.tz_localize('CET', ambiguous='infer') 2018-10-28 01:30:00+02:00 0 2018-10-28 02:00:00+02:00 1 2018-10-28 02:30:00+02:00 2 2018-10-28 02:00:00+01:00 3 2018-10-28 02:30:00+01:00 4 2018-10-28 03:00:00+01:00 5 2018-10-28 03:30:00+01:00 6 dtype: int64 In some cases, inferring the DST is impossible. In such cases, you can pass an ndarray to the ambiguous parameter to set the DST explicitly >>> s = pd.Series(range(3), index=pd.DatetimeIndex([ ... '2018-10-28 01:20:00', ... '2018-10-28 02:36:00', ... '2018-10-28 03:46:00'])) >>> s.tz_localize('CET', ambiguous=np.array([True, True, False])) 2018-10-28 01:20:00+02:00 0 2018-10-28 02:36:00+02:00 1 2018-10-28 03:46:00+01:00 2 dtype: int64 If the DST transition causes nonexistent times, you can shift these dates forward or backwards with a timedelta object or `'shift_forward'` or `'shift_backwards'`. >>> s = pd.Series(range(2), index=pd.DatetimeIndex([ ... '2015-03-29 02:30:00', ... '2015-03-29 03:30:00'])) >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward') 2015-03-29 03:00:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward') 2015-03-29 01:59:59.999999999+01:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64 >>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H')) 2015-03-29 03:30:00+02:00 0 2015-03-29 03:30:00+02:00 1 dtype: int64
[ "Localize", "tz", "-", "naive", "index", "of", "a", "Series", "or", "DataFrame", "to", "target", "time", "zone", "." ]
python
train
openstack/proliantutils
proliantutils/hpssa/objects.py
https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/hpssa/objects.py#L35-L62
def _get_key_value(string): """Return the (key, value) as a tuple from a string.""" # Normally all properties look like this: # Unique Identifier: 600508B1001CE4ACF473EE9C826230FF # Disk Name: /dev/sda # Mount Points: None key = '' value = '' try: key, value = string.split(': ') except ValueError: # This handles the case when the property of a logical drive # returned is as follows. Here we cannot split by ':' because # the disk id has colon in it. So if this is about disk, # then strip it accordingly. # Mirror Group 0: physicaldrive 6I:1:5 string = string.lstrip(' ') if string.startswith('physicaldrive'): fields = string.split(' ') # Include fields[1] to key to avoid duplicate pairs # with the same 'physicaldrive' key key = fields[0] + " " + fields[1] value = fields[1] else: # TODO(rameshg87): Check if this ever occurs. return string.strip(' '), None return key.strip(' '), value.strip(' ')
[ "def", "_get_key_value", "(", "string", ")", ":", "# Normally all properties look like this:", "# Unique Identifier: 600508B1001CE4ACF473EE9C826230FF", "# Disk Name: /dev/sda", "# Mount Points: None", "key", "=", "''", "value", "=", "''", "try", ":", "key", ",", "value", "=", "string", ".", "split", "(", "': '", ")", "except", "ValueError", ":", "# This handles the case when the property of a logical drive", "# returned is as follows. Here we cannot split by ':' because", "# the disk id has colon in it. So if this is about disk,", "# then strip it accordingly.", "# Mirror Group 0: physicaldrive 6I:1:5", "string", "=", "string", ".", "lstrip", "(", "' '", ")", "if", "string", ".", "startswith", "(", "'physicaldrive'", ")", ":", "fields", "=", "string", ".", "split", "(", "' '", ")", "# Include fields[1] to key to avoid duplicate pairs", "# with the same 'physicaldrive' key", "key", "=", "fields", "[", "0", "]", "+", "\" \"", "+", "fields", "[", "1", "]", "value", "=", "fields", "[", "1", "]", "else", ":", "# TODO(rameshg87): Check if this ever occurs.", "return", "string", ".", "strip", "(", "' '", ")", ",", "None", "return", "key", ".", "strip", "(", "' '", ")", ",", "value", ".", "strip", "(", "' '", ")" ]
Return the (key, value) as a tuple from a string.
[ "Return", "the", "(", "key", "value", ")", "as", "a", "tuple", "from", "a", "string", "." ]
python
train
msmbuilder/msmbuilder
basesetup.py
https://github.com/msmbuilder/msmbuilder/blob/556a93a170782f47be53f4a1e9d740fb1c8272b3/basesetup.py#L18-L30
def find_packages(): """Find all of mdtraj's python packages. Adapted from IPython's setupbase.py. Copyright IPython contributors, licensed under the BSD license. """ packages = ['mdtraj.scripts'] for dir,subdirs,files in os.walk('MDTraj'): package = dir.replace(os.path.sep, '.') if '__init__.py' not in files: # not a package continue packages.append(package.replace('MDTraj', 'mdtraj')) return packages
[ "def", "find_packages", "(", ")", ":", "packages", "=", "[", "'mdtraj.scripts'", "]", "for", "dir", ",", "subdirs", ",", "files", "in", "os", ".", "walk", "(", "'MDTraj'", ")", ":", "package", "=", "dir", ".", "replace", "(", "os", ".", "path", ".", "sep", ",", "'.'", ")", "if", "'__init__.py'", "not", "in", "files", ":", "# not a package", "continue", "packages", ".", "append", "(", "package", ".", "replace", "(", "'MDTraj'", ",", "'mdtraj'", ")", ")", "return", "packages" ]
Find all of mdtraj's python packages. Adapted from IPython's setupbase.py. Copyright IPython contributors, licensed under the BSD license.
[ "Find", "all", "of", "mdtraj", "s", "python", "packages", ".", "Adapted", "from", "IPython", "s", "setupbase", ".", "py", ".", "Copyright", "IPython", "contributors", "licensed", "under", "the", "BSD", "license", "." ]
python
train
angr/angr
angr/procedures/java_jni/__init__.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/procedures/java_jni/__init__.py#L79-L116
def _load_from_native_memory(self, addr, data_type=None, data_size=None, no_of_elements=1, return_as_list=False): """ Load from native memory. :param addr: Native load address. :param data_type: Java type of elements. If set, all loaded elements are casted to this type. :param data_size: Size of each element. If not set, size is determined based on the given type. :param no_of_elements: Number of elements to load. :param return_as_list: Whether to wrap a single element in a list. :return: The value or a list of loaded element(s). """ # check if addr is symbolic if addr is not None and self.state.solver.symbolic(addr): raise NotImplementedError('Symbolic addresses are not supported.') # if data size is not set, derive it from the type if not data_size: if data_type: data_size = ArchSoot.sizeof[data_type]//8 else: raise ValueError("Cannot determine the data size w/o a type.") native_memory_endness = self.state.arch.memory_endness # load elements values = [] for i in range(no_of_elements): value = self.state.memory.load(addr + i*data_size, size=data_size, endness=native_memory_endness) if data_type: value = self.state.project.simos.cast_primitive(self.state, value=value, to_type=data_type) values.append(value) # return element(s) if no_of_elements == 1 and not return_as_list: return values[0] else: return values
[ "def", "_load_from_native_memory", "(", "self", ",", "addr", ",", "data_type", "=", "None", ",", "data_size", "=", "None", ",", "no_of_elements", "=", "1", ",", "return_as_list", "=", "False", ")", ":", "# check if addr is symbolic", "if", "addr", "is", "not", "None", "and", "self", ".", "state", ".", "solver", ".", "symbolic", "(", "addr", ")", ":", "raise", "NotImplementedError", "(", "'Symbolic addresses are not supported.'", ")", "# if data size is not set, derive it from the type", "if", "not", "data_size", ":", "if", "data_type", ":", "data_size", "=", "ArchSoot", ".", "sizeof", "[", "data_type", "]", "//", "8", "else", ":", "raise", "ValueError", "(", "\"Cannot determine the data size w/o a type.\"", ")", "native_memory_endness", "=", "self", ".", "state", ".", "arch", ".", "memory_endness", "# load elements", "values", "=", "[", "]", "for", "i", "in", "range", "(", "no_of_elements", ")", ":", "value", "=", "self", ".", "state", ".", "memory", ".", "load", "(", "addr", "+", "i", "*", "data_size", ",", "size", "=", "data_size", ",", "endness", "=", "native_memory_endness", ")", "if", "data_type", ":", "value", "=", "self", ".", "state", ".", "project", ".", "simos", ".", "cast_primitive", "(", "self", ".", "state", ",", "value", "=", "value", ",", "to_type", "=", "data_type", ")", "values", ".", "append", "(", "value", ")", "# return element(s)", "if", "no_of_elements", "==", "1", "and", "not", "return_as_list", ":", "return", "values", "[", "0", "]", "else", ":", "return", "values" ]
Load from native memory. :param addr: Native load address. :param data_type: Java type of elements. If set, all loaded elements are casted to this type. :param data_size: Size of each element. If not set, size is determined based on the given type. :param no_of_elements: Number of elements to load. :param return_as_list: Whether to wrap a single element in a list. :return: The value or a list of loaded element(s).
[ "Load", "from", "native", "memory", "." ]
python
train
LonamiWebs/Telethon
telethon/tl/custom/message.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/tl/custom/message.py#L846-L869
def _needed_markup_bot(self): """ Returns the input peer of the bot that's needed for the reply markup. This is necessary for :tl:`KeyboardButtonSwitchInline` since we need to know what bot we want to start. Raises ``ValueError`` if the bot cannot be found but is needed. Returns ``None`` if it's not needed. """ if not isinstance(self.reply_markup, ( types.ReplyInlineMarkup, types.ReplyKeyboardMarkup)): return None for row in self.reply_markup.rows: for button in row.buttons: if isinstance(button, types.KeyboardButtonSwitchInline): if button.same_peer: bot = self.input_sender if not bot: raise ValueError('No input sender') else: try: return self._client._entity_cache[self.via_bot_id] except KeyError: raise ValueError('No input sender') from None
[ "def", "_needed_markup_bot", "(", "self", ")", ":", "if", "not", "isinstance", "(", "self", ".", "reply_markup", ",", "(", "types", ".", "ReplyInlineMarkup", ",", "types", ".", "ReplyKeyboardMarkup", ")", ")", ":", "return", "None", "for", "row", "in", "self", ".", "reply_markup", ".", "rows", ":", "for", "button", "in", "row", ".", "buttons", ":", "if", "isinstance", "(", "button", ",", "types", ".", "KeyboardButtonSwitchInline", ")", ":", "if", "button", ".", "same_peer", ":", "bot", "=", "self", ".", "input_sender", "if", "not", "bot", ":", "raise", "ValueError", "(", "'No input sender'", ")", "else", ":", "try", ":", "return", "self", ".", "_client", ".", "_entity_cache", "[", "self", ".", "via_bot_id", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "'No input sender'", ")", "from", "None" ]
Returns the input peer of the bot that's needed for the reply markup. This is necessary for :tl:`KeyboardButtonSwitchInline` since we need to know what bot we want to start. Raises ``ValueError`` if the bot cannot be found but is needed. Returns ``None`` if it's not needed.
[ "Returns", "the", "input", "peer", "of", "the", "bot", "that", "s", "needed", "for", "the", "reply", "markup", "." ]
python
train
moderngl/moderngl
moderngl/context.py
https://github.com/moderngl/moderngl/blob/a8f5dce8dc72ae84a2f9523887fb5f6b620049b9/moderngl/context.py#L639-L660
def texture3d(self, size, components, data=None, *, alignment=1, dtype='f1') -> 'Texture3D': ''' Create a :py:class:`Texture3D` object. Args: size (tuple): The width, height and depth of the texture. components (int): The number of components 1, 2, 3 or 4. data (bytes): Content of the texture. Keyword Args: alignment (int): The byte alignment 1, 2, 4 or 8. dtype (str): Data type. Returns: :py:class:`Texture3D` object ''' res = Texture3D.__new__(Texture3D) res.mglo, res._glo = self.mglo.texture3d(size, components, data, alignment, dtype) res.ctx = self res.extra = None return res
[ "def", "texture3d", "(", "self", ",", "size", ",", "components", ",", "data", "=", "None", ",", "*", ",", "alignment", "=", "1", ",", "dtype", "=", "'f1'", ")", "->", "'Texture3D'", ":", "res", "=", "Texture3D", ".", "__new__", "(", "Texture3D", ")", "res", ".", "mglo", ",", "res", ".", "_glo", "=", "self", ".", "mglo", ".", "texture3d", "(", "size", ",", "components", ",", "data", ",", "alignment", ",", "dtype", ")", "res", ".", "ctx", "=", "self", "res", ".", "extra", "=", "None", "return", "res" ]
Create a :py:class:`Texture3D` object. Args: size (tuple): The width, height and depth of the texture. components (int): The number of components 1, 2, 3 or 4. data (bytes): Content of the texture. Keyword Args: alignment (int): The byte alignment 1, 2, 4 or 8. dtype (str): Data type. Returns: :py:class:`Texture3D` object
[ "Create", "a", ":", "py", ":", "class", ":", "Texture3D", "object", "." ]
python
train
KelSolaar/Umbra
umbra/ui/completers.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/ui/completers.py#L114-L125
def __set_cache(self, tokens): """ Sets the tokens cache. :param tokens: Completer tokens list. :type tokens: tuple or list """ if DefaultCompleter._DefaultCompleter__tokens.get(self.__language): return DefaultCompleter._DefaultCompleter__tokens[self.__language] = tokens
[ "def", "__set_cache", "(", "self", ",", "tokens", ")", ":", "if", "DefaultCompleter", ".", "_DefaultCompleter__tokens", ".", "get", "(", "self", ".", "__language", ")", ":", "return", "DefaultCompleter", ".", "_DefaultCompleter__tokens", "[", "self", ".", "__language", "]", "=", "tokens" ]
Sets the tokens cache. :param tokens: Completer tokens list. :type tokens: tuple or list
[ "Sets", "the", "tokens", "cache", "." ]
python
train
riga/tfdeploy
tfdeploy.py
https://github.com/riga/tfdeploy/blob/8481f657d6e3a51d76185a195b993e45f448828a/tfdeploy.py#L1831-L1836
def Max(a, axis, keep_dims): """ Max reduction op. """ return np.amax(a, axis=axis if not isinstance(axis, np.ndarray) else tuple(axis), keepdims=keep_dims),
[ "def", "Max", "(", "a", ",", "axis", ",", "keep_dims", ")", ":", "return", "np", ".", "amax", "(", "a", ",", "axis", "=", "axis", "if", "not", "isinstance", "(", "axis", ",", "np", ".", "ndarray", ")", "else", "tuple", "(", "axis", ")", ",", "keepdims", "=", "keep_dims", ")", "," ]
Max reduction op.
[ "Max", "reduction", "op", "." ]
python
train
gem/oq-engine
openquake/commonlib/oqvalidation.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commonlib/oqvalidation.py#L468-L472
def lti(self): """ Dictionary extended_loss_type -> extended_loss_type index """ return {lt: i for i, (lt, dt) in enumerate(self.loss_dt_list())}
[ "def", "lti", "(", "self", ")", ":", "return", "{", "lt", ":", "i", "for", "i", ",", "(", "lt", ",", "dt", ")", "in", "enumerate", "(", "self", ".", "loss_dt_list", "(", ")", ")", "}" ]
Dictionary extended_loss_type -> extended_loss_type index
[ "Dictionary", "extended_loss_type", "-", ">", "extended_loss_type", "index" ]
python
train
mardix/Mocha
mocha/contrib/auth/models.py
https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/contrib/auth/models.py#L243-L254
def change_password(self, password): """ Change the password. :param password: :return: """ self.update(password_hash=self.encrypt_password(password), require_password_change=False) # Whenever the password is changed, reset the secret key to invalidate # any tokens in the wild self.reset_secret_key()
[ "def", "change_password", "(", "self", ",", "password", ")", ":", "self", ".", "update", "(", "password_hash", "=", "self", ".", "encrypt_password", "(", "password", ")", ",", "require_password_change", "=", "False", ")", "# Whenever the password is changed, reset the secret key to invalidate", "# any tokens in the wild", "self", ".", "reset_secret_key", "(", ")" ]
Change the password. :param password: :return:
[ "Change", "the", "password", ".", ":", "param", "password", ":", ":", "return", ":" ]
python
train
DataONEorg/d1_python
lib_client/src/d1_client/solr_client.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_client/src/d1_client/solr_client.py#L501-L504
def _get_query(self, **query_dict): """Perform a GET query against Solr and return the response as a Python dict.""" param_dict = query_dict.copy() return self._send_query(do_post=False, **param_dict)
[ "def", "_get_query", "(", "self", ",", "*", "*", "query_dict", ")", ":", "param_dict", "=", "query_dict", ".", "copy", "(", ")", "return", "self", ".", "_send_query", "(", "do_post", "=", "False", ",", "*", "*", "param_dict", ")" ]
Perform a GET query against Solr and return the response as a Python dict.
[ "Perform", "a", "GET", "query", "against", "Solr", "and", "return", "the", "response", "as", "a", "Python", "dict", "." ]
python
train
moderngl/moderngl
moderngl/context.py
https://github.com/moderngl/moderngl/blob/a8f5dce8dc72ae84a2f9523887fb5f6b620049b9/moderngl/context.py#L321-L405
def info(self) -> Dict[str, object]: ''' dict: Information about the context Example:: { 'GL_VENDOR': 'NVIDIA Corporation', 'GL_RENDERER': 'NVIDIA GeForce GT 650M OpenGL Engine', 'GL_VERSION': '4.1 NVIDIA-10.32.0 355.11.10.10.40.102', 'GL_POINT_SIZE_RANGE': (1.0, 2047.0), 'GL_SMOOTH_LINE_WIDTH_RANGE': (0.5, 1.0), 'GL_ALIASED_LINE_WIDTH_RANGE': (1.0, 1.0), 'GL_POINT_FADE_THRESHOLD_SIZE': 1.0, 'GL_POINT_SIZE_GRANULARITY': 0.125, 'GL_SMOOTH_LINE_WIDTH_GRANULARITY': 0.125, 'GL_MIN_PROGRAM_TEXEL_OFFSET': -8.0, 'GL_MAX_PROGRAM_TEXEL_OFFSET': 7.0, 'GL_MINOR_VERSION': 1, 'GL_MAJOR_VERSION': 4, 'GL_SAMPLE_BUFFERS': 0, 'GL_SUBPIXEL_BITS': 8, 'GL_CONTEXT_PROFILE_MASK': 1, 'GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT': 256, 'GL_DOUBLEBUFFER': False, 'GL_STEREO': False, 'GL_MAX_VIEWPORT_DIMS': (16384, 16384), 'GL_MAX_3D_TEXTURE_SIZE': 2048, 'GL_MAX_ARRAY_TEXTURE_LAYERS': 2048, 'GL_MAX_CLIP_DISTANCES': 8, 'GL_MAX_COLOR_ATTACHMENTS': 8, 'GL_MAX_COLOR_TEXTURE_SAMPLES': 8, 'GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS': 233472, 'GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS': 231424, 'GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS': 80, 'GL_MAX_COMBINED_UNIFORM_BLOCKS': 70, 'GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS': 233472, 'GL_MAX_CUBE_MAP_TEXTURE_SIZE': 16384, 'GL_MAX_DEPTH_TEXTURE_SAMPLES': 8, 'GL_MAX_DRAW_BUFFERS': 8, 'GL_MAX_DUAL_SOURCE_DRAW_BUFFERS': 1, 'GL_MAX_ELEMENTS_INDICES': 150000, 'GL_MAX_ELEMENTS_VERTICES': 1048575, 'GL_MAX_FRAGMENT_INPUT_COMPONENTS': 128, 'GL_MAX_FRAGMENT_UNIFORM_COMPONENTS': 4096, 'GL_MAX_FRAGMENT_UNIFORM_VECTORS': 1024, 'GL_MAX_FRAGMENT_UNIFORM_BLOCKS': 14, 'GL_MAX_GEOMETRY_INPUT_COMPONENTS': 128, 'GL_MAX_GEOMETRY_OUTPUT_COMPONENTS': 128, 'GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS': 16, 'GL_MAX_GEOMETRY_UNIFORM_BLOCKS': 14, 'GL_MAX_GEOMETRY_UNIFORM_COMPONENTS': 2048, 'GL_MAX_INTEGER_SAMPLES': 1, 'GL_MAX_SAMPLES': 8, 'GL_MAX_RECTANGLE_TEXTURE_SIZE': 16384, 'GL_MAX_RENDERBUFFER_SIZE': 16384, 'GL_MAX_SAMPLE_MASK_WORDS': 1, 'GL_MAX_SERVER_WAIT_TIMEOUT': -1, 'GL_MAX_TEXTURE_BUFFER_SIZE': 134217728, 'GL_MAX_TEXTURE_IMAGE_UNITS': 16, 'GL_MAX_TEXTURE_LOD_BIAS': 15, 'GL_MAX_TEXTURE_SIZE': 16384, 'GL_MAX_UNIFORM_BUFFER_BINDINGS': 70, 'GL_MAX_UNIFORM_BLOCK_SIZE': 65536, 'GL_MAX_VARYING_COMPONENTS': 0, 'GL_MAX_VARYING_VECTORS': 31, 'GL_MAX_VARYING_FLOATS': 0, 'GL_MAX_VERTEX_ATTRIBS': 16, 'GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS': 16, 'GL_MAX_VERTEX_UNIFORM_COMPONENTS': 4096, 'GL_MAX_VERTEX_UNIFORM_VECTORS': 1024, 'GL_MAX_VERTEX_OUTPUT_COMPONENTS': 128, 'GL_MAX_VERTEX_UNIFORM_BLOCKS': 14, 'GL_MAX_VERTEX_ATTRIB_RELATIVE_OFFSET': 0, 'GL_MAX_VERTEX_ATTRIB_BINDINGS': 0, 'GL_VIEWPORT_BOUNDS_RANGE': (-32768, 32768), 'GL_VIEWPORT_SUBPIXEL_BITS': 0, 'GL_MAX_VIEWPORTS': 16 } ''' if self._info is None: self._info = self.mglo.info return self._info
[ "def", "info", "(", "self", ")", "->", "Dict", "[", "str", ",", "object", "]", ":", "if", "self", ".", "_info", "is", "None", ":", "self", ".", "_info", "=", "self", ".", "mglo", ".", "info", "return", "self", ".", "_info" ]
dict: Information about the context Example:: { 'GL_VENDOR': 'NVIDIA Corporation', 'GL_RENDERER': 'NVIDIA GeForce GT 650M OpenGL Engine', 'GL_VERSION': '4.1 NVIDIA-10.32.0 355.11.10.10.40.102', 'GL_POINT_SIZE_RANGE': (1.0, 2047.0), 'GL_SMOOTH_LINE_WIDTH_RANGE': (0.5, 1.0), 'GL_ALIASED_LINE_WIDTH_RANGE': (1.0, 1.0), 'GL_POINT_FADE_THRESHOLD_SIZE': 1.0, 'GL_POINT_SIZE_GRANULARITY': 0.125, 'GL_SMOOTH_LINE_WIDTH_GRANULARITY': 0.125, 'GL_MIN_PROGRAM_TEXEL_OFFSET': -8.0, 'GL_MAX_PROGRAM_TEXEL_OFFSET': 7.0, 'GL_MINOR_VERSION': 1, 'GL_MAJOR_VERSION': 4, 'GL_SAMPLE_BUFFERS': 0, 'GL_SUBPIXEL_BITS': 8, 'GL_CONTEXT_PROFILE_MASK': 1, 'GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT': 256, 'GL_DOUBLEBUFFER': False, 'GL_STEREO': False, 'GL_MAX_VIEWPORT_DIMS': (16384, 16384), 'GL_MAX_3D_TEXTURE_SIZE': 2048, 'GL_MAX_ARRAY_TEXTURE_LAYERS': 2048, 'GL_MAX_CLIP_DISTANCES': 8, 'GL_MAX_COLOR_ATTACHMENTS': 8, 'GL_MAX_COLOR_TEXTURE_SAMPLES': 8, 'GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS': 233472, 'GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS': 231424, 'GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS': 80, 'GL_MAX_COMBINED_UNIFORM_BLOCKS': 70, 'GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS': 233472, 'GL_MAX_CUBE_MAP_TEXTURE_SIZE': 16384, 'GL_MAX_DEPTH_TEXTURE_SAMPLES': 8, 'GL_MAX_DRAW_BUFFERS': 8, 'GL_MAX_DUAL_SOURCE_DRAW_BUFFERS': 1, 'GL_MAX_ELEMENTS_INDICES': 150000, 'GL_MAX_ELEMENTS_VERTICES': 1048575, 'GL_MAX_FRAGMENT_INPUT_COMPONENTS': 128, 'GL_MAX_FRAGMENT_UNIFORM_COMPONENTS': 4096, 'GL_MAX_FRAGMENT_UNIFORM_VECTORS': 1024, 'GL_MAX_FRAGMENT_UNIFORM_BLOCKS': 14, 'GL_MAX_GEOMETRY_INPUT_COMPONENTS': 128, 'GL_MAX_GEOMETRY_OUTPUT_COMPONENTS': 128, 'GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS': 16, 'GL_MAX_GEOMETRY_UNIFORM_BLOCKS': 14, 'GL_MAX_GEOMETRY_UNIFORM_COMPONENTS': 2048, 'GL_MAX_INTEGER_SAMPLES': 1, 'GL_MAX_SAMPLES': 8, 'GL_MAX_RECTANGLE_TEXTURE_SIZE': 16384, 'GL_MAX_RENDERBUFFER_SIZE': 16384, 'GL_MAX_SAMPLE_MASK_WORDS': 1, 'GL_MAX_SERVER_WAIT_TIMEOUT': -1, 'GL_MAX_TEXTURE_BUFFER_SIZE': 134217728, 'GL_MAX_TEXTURE_IMAGE_UNITS': 16, 'GL_MAX_TEXTURE_LOD_BIAS': 15, 'GL_MAX_TEXTURE_SIZE': 16384, 'GL_MAX_UNIFORM_BUFFER_BINDINGS': 70, 'GL_MAX_UNIFORM_BLOCK_SIZE': 65536, 'GL_MAX_VARYING_COMPONENTS': 0, 'GL_MAX_VARYING_VECTORS': 31, 'GL_MAX_VARYING_FLOATS': 0, 'GL_MAX_VERTEX_ATTRIBS': 16, 'GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS': 16, 'GL_MAX_VERTEX_UNIFORM_COMPONENTS': 4096, 'GL_MAX_VERTEX_UNIFORM_VECTORS': 1024, 'GL_MAX_VERTEX_OUTPUT_COMPONENTS': 128, 'GL_MAX_VERTEX_UNIFORM_BLOCKS': 14, 'GL_MAX_VERTEX_ATTRIB_RELATIVE_OFFSET': 0, 'GL_MAX_VERTEX_ATTRIB_BINDINGS': 0, 'GL_VIEWPORT_BOUNDS_RANGE': (-32768, 32768), 'GL_VIEWPORT_SUBPIXEL_BITS': 0, 'GL_MAX_VIEWPORTS': 16 }
[ "dict", ":", "Information", "about", "the", "context" ]
python
train
linkedin/naarad
src/naarad/httpdownload.py
https://github.com/linkedin/naarad/blob/261e2c0760fd6a6b0ee59064180bd8e3674311fe/src/naarad/httpdownload.py#L141-L159
def download_url_single(inputs, outdir, outfile=None): """ Downloads a http(s) url to a local file :param str inputs: the absolute url :param str outdir: Required. the local directory to put the downloadedfiles. :param str outfile: // Optional. If this is given, the downloaded url will be renated to outfile; If this is not given, then the local file will be the original one, as given in url. :return: the local full path name of downloaded url """ if not inputs or type(inputs) != str or not outdir or type(outdir) != str: logging.error("The call parameters are invalid.") return else: if not os.path.exists(outdir): os.makedirs(outdir) output_file = handle_single_url(inputs, outdir, outfile) return output_file
[ "def", "download_url_single", "(", "inputs", ",", "outdir", ",", "outfile", "=", "None", ")", ":", "if", "not", "inputs", "or", "type", "(", "inputs", ")", "!=", "str", "or", "not", "outdir", "or", "type", "(", "outdir", ")", "!=", "str", ":", "logging", ".", "error", "(", "\"The call parameters are invalid.\"", ")", "return", "else", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "outdir", ")", ":", "os", ".", "makedirs", "(", "outdir", ")", "output_file", "=", "handle_single_url", "(", "inputs", ",", "outdir", ",", "outfile", ")", "return", "output_file" ]
Downloads a http(s) url to a local file :param str inputs: the absolute url :param str outdir: Required. the local directory to put the downloadedfiles. :param str outfile: // Optional. If this is given, the downloaded url will be renated to outfile; If this is not given, then the local file will be the original one, as given in url. :return: the local full path name of downloaded url
[ "Downloads", "a", "http", "(", "s", ")", "url", "to", "a", "local", "file", ":", "param", "str", "inputs", ":", "the", "absolute", "url", ":", "param", "str", "outdir", ":", "Required", ".", "the", "local", "directory", "to", "put", "the", "downloadedfiles", ".", ":", "param", "str", "outfile", ":", "//", "Optional", ".", "If", "this", "is", "given", "the", "downloaded", "url", "will", "be", "renated", "to", "outfile", ";", "If", "this", "is", "not", "given", "then", "the", "local", "file", "will", "be", "the", "original", "one", "as", "given", "in", "url", ".", ":", "return", ":", "the", "local", "full", "path", "name", "of", "downloaded", "url" ]
python
valid
urbn/Caesium
caesium/handler.py
https://github.com/urbn/Caesium/blob/2a14fe79724c38fe9a1b20f7b8f518f8c6d50df1/caesium/handler.py#L716-L733
def delete(self, bulk_id): """Update many objects with a single toa :param str bulk_id: The bulk id for the job you want to delete """ collection_name = self.request.headers.get("collection") if not collection_name: self.raise_error(400, "Missing a collection name header") self.revisions = BaseAsyncMotorDocument("%s_revisions" % collection_name) self.logger.info("Deleting revisions with bulk_id %s" % (bulk_id)) result = yield self.revisions.collection.remove({"meta.bulk_id": bulk_id}) self.write(result)
[ "def", "delete", "(", "self", ",", "bulk_id", ")", ":", "collection_name", "=", "self", ".", "request", ".", "headers", ".", "get", "(", "\"collection\"", ")", "if", "not", "collection_name", ":", "self", ".", "raise_error", "(", "400", ",", "\"Missing a collection name header\"", ")", "self", ".", "revisions", "=", "BaseAsyncMotorDocument", "(", "\"%s_revisions\"", "%", "collection_name", ")", "self", ".", "logger", ".", "info", "(", "\"Deleting revisions with bulk_id %s\"", "%", "(", "bulk_id", ")", ")", "result", "=", "yield", "self", ".", "revisions", ".", "collection", ".", "remove", "(", "{", "\"meta.bulk_id\"", ":", "bulk_id", "}", ")", "self", ".", "write", "(", "result", ")" ]
Update many objects with a single toa :param str bulk_id: The bulk id for the job you want to delete
[ "Update", "many", "objects", "with", "a", "single", "toa" ]
python
train
senaite/senaite.core
bika/lims/workflow/worksheet/guards.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/workflow/worksheet/guards.py#L60-L75
def guard_retract(worksheet): """Return whether the transition retract can be performed or not to the worksheet passed in. Since the retract transition from worksheet is a shortcut to retract transitions from all analyses the worksheet contains, this guard only returns True if retract transition is allowed for all analyses the worksheet contains """ analyses = worksheet.getAnalyses() detached = ['rejected', 'retracted'] num_detached = 0 for analysis in analyses: if api.get_workflow_status_of(analysis) in detached: num_detached += 1 elif not isTransitionAllowed(analysis, "retract"): return False return analyses and num_detached < len(analyses) or False
[ "def", "guard_retract", "(", "worksheet", ")", ":", "analyses", "=", "worksheet", ".", "getAnalyses", "(", ")", "detached", "=", "[", "'rejected'", ",", "'retracted'", "]", "num_detached", "=", "0", "for", "analysis", "in", "analyses", ":", "if", "api", ".", "get_workflow_status_of", "(", "analysis", ")", "in", "detached", ":", "num_detached", "+=", "1", "elif", "not", "isTransitionAllowed", "(", "analysis", ",", "\"retract\"", ")", ":", "return", "False", "return", "analyses", "and", "num_detached", "<", "len", "(", "analyses", ")", "or", "False" ]
Return whether the transition retract can be performed or not to the worksheet passed in. Since the retract transition from worksheet is a shortcut to retract transitions from all analyses the worksheet contains, this guard only returns True if retract transition is allowed for all analyses the worksheet contains
[ "Return", "whether", "the", "transition", "retract", "can", "be", "performed", "or", "not", "to", "the", "worksheet", "passed", "in", ".", "Since", "the", "retract", "transition", "from", "worksheet", "is", "a", "shortcut", "to", "retract", "transitions", "from", "all", "analyses", "the", "worksheet", "contains", "this", "guard", "only", "returns", "True", "if", "retract", "transition", "is", "allowed", "for", "all", "analyses", "the", "worksheet", "contains" ]
python
train
ejeschke/ginga
ginga/rv/plugins/MultiDim.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/MultiDim.py#L444-L484
def set_naxis(self, idx, n): """Change the slice shown in the channel viewer. `idx` is the slice index (0-based); `n` is the axis (0-based) """ self.play_idx = idx self.logger.debug("naxis %d index is %d" % (n + 1, idx + 1)) image = self.fitsimage.get_image() slidername = 'choose_naxis%d' % (n + 1) try: if image is None: raise ValueError("Please load an image cube") m = n - 2 self.naxispath[m] = idx self.logger.debug("m=%d naxispath=%s" % (m, str(self.naxispath))) image.set_naxispath(self.naxispath) self.logger.debug("NAXIS%d slice %d loaded." % (n + 1, idx + 1)) if self.play_indices: # save play index self.play_indices[m] = idx text = [i + 1 for i in self.naxispath] if slidername in self.w: self.w[slidername].set_value(text[m]) else: text = idx + 1 if slidername in self.w: self.w[slidername].set_value(text) self.w.slice.set_text(str(text)) # schedule a redraw self.fitsimage.redraw(whence=0) except Exception as e: errmsg = "Error loading NAXIS%d slice %d: %s" % ( n + 1, idx + 1, str(e)) self.logger.error(errmsg) self.fv.error(errmsg)
[ "def", "set_naxis", "(", "self", ",", "idx", ",", "n", ")", ":", "self", ".", "play_idx", "=", "idx", "self", ".", "logger", ".", "debug", "(", "\"naxis %d index is %d\"", "%", "(", "n", "+", "1", ",", "idx", "+", "1", ")", ")", "image", "=", "self", ".", "fitsimage", ".", "get_image", "(", ")", "slidername", "=", "'choose_naxis%d'", "%", "(", "n", "+", "1", ")", "try", ":", "if", "image", "is", "None", ":", "raise", "ValueError", "(", "\"Please load an image cube\"", ")", "m", "=", "n", "-", "2", "self", ".", "naxispath", "[", "m", "]", "=", "idx", "self", ".", "logger", ".", "debug", "(", "\"m=%d naxispath=%s\"", "%", "(", "m", ",", "str", "(", "self", ".", "naxispath", ")", ")", ")", "image", ".", "set_naxispath", "(", "self", ".", "naxispath", ")", "self", ".", "logger", ".", "debug", "(", "\"NAXIS%d slice %d loaded.\"", "%", "(", "n", "+", "1", ",", "idx", "+", "1", ")", ")", "if", "self", ".", "play_indices", ":", "# save play index", "self", ".", "play_indices", "[", "m", "]", "=", "idx", "text", "=", "[", "i", "+", "1", "for", "i", "in", "self", ".", "naxispath", "]", "if", "slidername", "in", "self", ".", "w", ":", "self", ".", "w", "[", "slidername", "]", ".", "set_value", "(", "text", "[", "m", "]", ")", "else", ":", "text", "=", "idx", "+", "1", "if", "slidername", "in", "self", ".", "w", ":", "self", ".", "w", "[", "slidername", "]", ".", "set_value", "(", "text", ")", "self", ".", "w", ".", "slice", ".", "set_text", "(", "str", "(", "text", ")", ")", "# schedule a redraw", "self", ".", "fitsimage", ".", "redraw", "(", "whence", "=", "0", ")", "except", "Exception", "as", "e", ":", "errmsg", "=", "\"Error loading NAXIS%d slice %d: %s\"", "%", "(", "n", "+", "1", ",", "idx", "+", "1", ",", "str", "(", "e", ")", ")", "self", ".", "logger", ".", "error", "(", "errmsg", ")", "self", ".", "fv", ".", "error", "(", "errmsg", ")" ]
Change the slice shown in the channel viewer. `idx` is the slice index (0-based); `n` is the axis (0-based)
[ "Change", "the", "slice", "shown", "in", "the", "channel", "viewer", ".", "idx", "is", "the", "slice", "index", "(", "0", "-", "based", ")", ";", "n", "is", "the", "axis", "(", "0", "-", "based", ")" ]
python
train
glue-viz/echo
echo/core.py
https://github.com/glue-viz/echo/blob/6ad54cc5e869de27c34e8716f2619ddc640f08fe/echo/core.py#L334-L372
def add_callback(instance, prop, callback, echo_old=False, priority=0): """ Attach a callback function to a property in an instance Parameters ---------- instance The instance to add the callback to prop : str Name of callback property in `instance` callback : func The callback function to add echo_old : bool, optional If `True`, the callback function will be invoked with both the old and new values of the property, as ``func(old, new)``. If `False` (the default), will be invoked as ``func(new)`` priority : int, optional This can optionally be used to force a certain order of execution of callbacks (larger values indicate a higher priority). Examples -------- :: class Foo: bar = CallbackProperty(0) def callback(value): pass f = Foo() add_callback(f, 'bar', callback) """ p = getattr(type(instance), prop) if not isinstance(p, CallbackProperty): raise TypeError("%s is not a CallbackProperty" % prop) p.add_callback(instance, callback, echo_old=echo_old, priority=priority)
[ "def", "add_callback", "(", "instance", ",", "prop", ",", "callback", ",", "echo_old", "=", "False", ",", "priority", "=", "0", ")", ":", "p", "=", "getattr", "(", "type", "(", "instance", ")", ",", "prop", ")", "if", "not", "isinstance", "(", "p", ",", "CallbackProperty", ")", ":", "raise", "TypeError", "(", "\"%s is not a CallbackProperty\"", "%", "prop", ")", "p", ".", "add_callback", "(", "instance", ",", "callback", ",", "echo_old", "=", "echo_old", ",", "priority", "=", "priority", ")" ]
Attach a callback function to a property in an instance Parameters ---------- instance The instance to add the callback to prop : str Name of callback property in `instance` callback : func The callback function to add echo_old : bool, optional If `True`, the callback function will be invoked with both the old and new values of the property, as ``func(old, new)``. If `False` (the default), will be invoked as ``func(new)`` priority : int, optional This can optionally be used to force a certain order of execution of callbacks (larger values indicate a higher priority). Examples -------- :: class Foo: bar = CallbackProperty(0) def callback(value): pass f = Foo() add_callback(f, 'bar', callback)
[ "Attach", "a", "callback", "function", "to", "a", "property", "in", "an", "instance" ]
python
train
Valuehorizon/valuehorizon-companies
companies/models.py
https://github.com/Valuehorizon/valuehorizon-companies/blob/5366e230da69ee30fcdc1bf4beddc99310f6b767/companies/models.py#L266-L274
def cache_data(self): """ Cache some basic data such as financial statement metrics """ # Set Slug if not set if not self.slug_name: self.slug_name = slugify(self.name).strip() if len(self.slug_name) > 255: self.slug_name = self.slug_name[0:254]
[ "def", "cache_data", "(", "self", ")", ":", "# Set Slug if not set", "if", "not", "self", ".", "slug_name", ":", "self", ".", "slug_name", "=", "slugify", "(", "self", ".", "name", ")", ".", "strip", "(", ")", "if", "len", "(", "self", ".", "slug_name", ")", ">", "255", ":", "self", ".", "slug_name", "=", "self", ".", "slug_name", "[", "0", ":", "254", "]" ]
Cache some basic data such as financial statement metrics
[ "Cache", "some", "basic", "data", "such", "as", "financial", "statement", "metrics" ]
python
train
mikusjelly/apkutils
apkutils/apkfile.py
https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/apkfile.py#L1417-L1519
def write(self, filename, arcname=None, compress_type=None): """Put the bytes from filename into the archive under the name arcname.""" if not self.fp: raise RuntimeError( "Attempt to write to ZIP archive that was already closed") st = os.stat(filename) isdir = stat.S_ISDIR(st.st_mode) mtime = time.localtime(st.st_mtime) date_time = mtime[0:6] # Create ZipInfo instance to store file information if arcname is None: arcname = filename arcname = os.path.normpath(os.path.splitdrive(arcname)[1]) while arcname[0] in (os.sep, os.altsep): arcname = arcname[1:] if isdir: arcname += '/' zinfo = ZipInfo(arcname, date_time) zinfo.external_attr = (st[0] & 0xFFFF) << 16 # Unix attributes if compress_type is None: zinfo.compress_type = self.compression else: zinfo.compress_type = compress_type zinfo.file_size = st.st_size zinfo.flag_bits = 0x00 with self._lock: if self._seekable: self.fp.seek(self.start_dir) zinfo.header_offset = self.fp.tell() # Start of header bytes if zinfo.compress_type == ZIP_LZMA: # Compressed data includes an end-of-stream (EOS) marker zinfo.flag_bits |= 0x02 self._writecheck(zinfo) self._didModify = True if isdir: zinfo.file_size = 0 zinfo.compress_size = 0 zinfo.CRC = 0 zinfo.external_attr |= 0x10 # MS-DOS directory flag self.filelist.append(zinfo) self.NameToInfo[zinfo.filename] = zinfo self.fp.write(zinfo.FileHeader(False)) self.start_dir = self.fp.tell() return cmpr = _get_compressor(zinfo.compress_type) if not self._seekable: zinfo.flag_bits |= 0x08 with open(filename, "rb") as fp: # Must overwrite CRC and sizes with correct data later zinfo.CRC = CRC = 0 zinfo.compress_size = compress_size = 0 # Compressed size can be larger than uncompressed size zip64 = self._allowZip64 and \ zinfo.file_size * 1.05 > ZIP64_LIMIT self.fp.write(zinfo.FileHeader(zip64)) file_size = 0 while 1: buf = fp.read(1024 * 8) if not buf: break file_size = file_size + len(buf) CRC = crc32(buf, CRC) & 0xffffffff if cmpr: buf = cmpr.compress(buf) compress_size = compress_size + len(buf) self.fp.write(buf) if cmpr: buf = cmpr.flush() compress_size = compress_size + len(buf) self.fp.write(buf) zinfo.compress_size = compress_size else: zinfo.compress_size = file_size zinfo.CRC = CRC zinfo.file_size = file_size if zinfo.flag_bits & 0x08: # Write CRC and file sizes after the file data fmt = '<LQQ' if zip64 else '<LLL' self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size, zinfo.file_size)) self.start_dir = self.fp.tell() else: if not zip64 and self._allowZip64: if file_size > ZIP64_LIMIT: raise RuntimeError( 'File size has increased during compressing') if compress_size > ZIP64_LIMIT: raise RuntimeError( 'Compressed size larger than uncompressed size') # Seek backwards and write file header (which will now include # correct CRC and file sizes) self.start_dir = self.fp.tell() # Preserve current position in file self.fp.seek(zinfo.header_offset) self.fp.write(zinfo.FileHeader(zip64)) self.fp.seek(self.start_dir) self.filelist.append(zinfo) self.NameToInfo[zinfo.filename] = zinfo
[ "def", "write", "(", "self", ",", "filename", ",", "arcname", "=", "None", ",", "compress_type", "=", "None", ")", ":", "if", "not", "self", ".", "fp", ":", "raise", "RuntimeError", "(", "\"Attempt to write to ZIP archive that was already closed\"", ")", "st", "=", "os", ".", "stat", "(", "filename", ")", "isdir", "=", "stat", ".", "S_ISDIR", "(", "st", ".", "st_mode", ")", "mtime", "=", "time", ".", "localtime", "(", "st", ".", "st_mtime", ")", "date_time", "=", "mtime", "[", "0", ":", "6", "]", "# Create ZipInfo instance to store file information", "if", "arcname", "is", "None", ":", "arcname", "=", "filename", "arcname", "=", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "splitdrive", "(", "arcname", ")", "[", "1", "]", ")", "while", "arcname", "[", "0", "]", "in", "(", "os", ".", "sep", ",", "os", ".", "altsep", ")", ":", "arcname", "=", "arcname", "[", "1", ":", "]", "if", "isdir", ":", "arcname", "+=", "'/'", "zinfo", "=", "ZipInfo", "(", "arcname", ",", "date_time", ")", "zinfo", ".", "external_attr", "=", "(", "st", "[", "0", "]", "&", "0xFFFF", ")", "<<", "16", "# Unix attributes", "if", "compress_type", "is", "None", ":", "zinfo", ".", "compress_type", "=", "self", ".", "compression", "else", ":", "zinfo", ".", "compress_type", "=", "compress_type", "zinfo", ".", "file_size", "=", "st", ".", "st_size", "zinfo", ".", "flag_bits", "=", "0x00", "with", "self", ".", "_lock", ":", "if", "self", ".", "_seekable", ":", "self", ".", "fp", ".", "seek", "(", "self", ".", "start_dir", ")", "zinfo", ".", "header_offset", "=", "self", ".", "fp", ".", "tell", "(", ")", "# Start of header bytes", "if", "zinfo", ".", "compress_type", "==", "ZIP_LZMA", ":", "# Compressed data includes an end-of-stream (EOS) marker", "zinfo", ".", "flag_bits", "|=", "0x02", "self", ".", "_writecheck", "(", "zinfo", ")", "self", ".", "_didModify", "=", "True", "if", "isdir", ":", "zinfo", ".", "file_size", "=", "0", "zinfo", ".", "compress_size", "=", "0", "zinfo", ".", "CRC", "=", "0", "zinfo", ".", "external_attr", "|=", "0x10", "# MS-DOS directory flag", "self", ".", "filelist", ".", "append", "(", "zinfo", ")", "self", ".", "NameToInfo", "[", "zinfo", ".", "filename", "]", "=", "zinfo", "self", ".", "fp", ".", "write", "(", "zinfo", ".", "FileHeader", "(", "False", ")", ")", "self", ".", "start_dir", "=", "self", ".", "fp", ".", "tell", "(", ")", "return", "cmpr", "=", "_get_compressor", "(", "zinfo", ".", "compress_type", ")", "if", "not", "self", ".", "_seekable", ":", "zinfo", ".", "flag_bits", "|=", "0x08", "with", "open", "(", "filename", ",", "\"rb\"", ")", "as", "fp", ":", "# Must overwrite CRC and sizes with correct data later", "zinfo", ".", "CRC", "=", "CRC", "=", "0", "zinfo", ".", "compress_size", "=", "compress_size", "=", "0", "# Compressed size can be larger than uncompressed size", "zip64", "=", "self", ".", "_allowZip64", "and", "zinfo", ".", "file_size", "*", "1.05", ">", "ZIP64_LIMIT", "self", ".", "fp", ".", "write", "(", "zinfo", ".", "FileHeader", "(", "zip64", ")", ")", "file_size", "=", "0", "while", "1", ":", "buf", "=", "fp", ".", "read", "(", "1024", "*", "8", ")", "if", "not", "buf", ":", "break", "file_size", "=", "file_size", "+", "len", "(", "buf", ")", "CRC", "=", "crc32", "(", "buf", ",", "CRC", ")", "&", "0xffffffff", "if", "cmpr", ":", "buf", "=", "cmpr", ".", "compress", "(", "buf", ")", "compress_size", "=", "compress_size", "+", "len", "(", "buf", ")", "self", ".", "fp", ".", "write", "(", "buf", ")", "if", "cmpr", ":", "buf", "=", "cmpr", ".", "flush", "(", ")", "compress_size", "=", "compress_size", "+", "len", "(", "buf", ")", "self", ".", "fp", ".", "write", "(", "buf", ")", "zinfo", ".", "compress_size", "=", "compress_size", "else", ":", "zinfo", ".", "compress_size", "=", "file_size", "zinfo", ".", "CRC", "=", "CRC", "zinfo", ".", "file_size", "=", "file_size", "if", "zinfo", ".", "flag_bits", "&", "0x08", ":", "# Write CRC and file sizes after the file data", "fmt", "=", "'<LQQ'", "if", "zip64", "else", "'<LLL'", "self", ".", "fp", ".", "write", "(", "struct", ".", "pack", "(", "fmt", ",", "zinfo", ".", "CRC", ",", "zinfo", ".", "compress_size", ",", "zinfo", ".", "file_size", ")", ")", "self", ".", "start_dir", "=", "self", ".", "fp", ".", "tell", "(", ")", "else", ":", "if", "not", "zip64", "and", "self", ".", "_allowZip64", ":", "if", "file_size", ">", "ZIP64_LIMIT", ":", "raise", "RuntimeError", "(", "'File size has increased during compressing'", ")", "if", "compress_size", ">", "ZIP64_LIMIT", ":", "raise", "RuntimeError", "(", "'Compressed size larger than uncompressed size'", ")", "# Seek backwards and write file header (which will now include", "# correct CRC and file sizes)", "self", ".", "start_dir", "=", "self", ".", "fp", ".", "tell", "(", ")", "# Preserve current position in file", "self", ".", "fp", ".", "seek", "(", "zinfo", ".", "header_offset", ")", "self", ".", "fp", ".", "write", "(", "zinfo", ".", "FileHeader", "(", "zip64", ")", ")", "self", ".", "fp", ".", "seek", "(", "self", ".", "start_dir", ")", "self", ".", "filelist", ".", "append", "(", "zinfo", ")", "self", ".", "NameToInfo", "[", "zinfo", ".", "filename", "]", "=", "zinfo" ]
Put the bytes from filename into the archive under the name arcname.
[ "Put", "the", "bytes", "from", "filename", "into", "the", "archive", "under", "the", "name", "arcname", "." ]
python
train
daler/metaseq
metaseq/_genomic_signal.py
https://github.com/daler/metaseq/blob/fa875d1f72317aa7ef95cb128b739956b16eef9f/metaseq/_genomic_signal.py#L50-L70
def genomic_signal(fn, kind): """ Factory function that makes the right class for the file format. Typically you'll only need this function to create a new genomic signal object. :param fn: Filename :param kind: String. Format of the file; see metaseq.genomic_signal._registry.keys() """ try: klass = _registry[kind.lower()] except KeyError: raise ValueError( 'No support for %s format, choices are %s' % (kind, _registry.keys())) m = klass(fn) m.kind = kind return m
[ "def", "genomic_signal", "(", "fn", ",", "kind", ")", ":", "try", ":", "klass", "=", "_registry", "[", "kind", ".", "lower", "(", ")", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "'No support for %s format, choices are %s'", "%", "(", "kind", ",", "_registry", ".", "keys", "(", ")", ")", ")", "m", "=", "klass", "(", "fn", ")", "m", ".", "kind", "=", "kind", "return", "m" ]
Factory function that makes the right class for the file format. Typically you'll only need this function to create a new genomic signal object. :param fn: Filename :param kind: String. Format of the file; see metaseq.genomic_signal._registry.keys()
[ "Factory", "function", "that", "makes", "the", "right", "class", "for", "the", "file", "format", "." ]
python
train
Kitware/tangelo
tangelo/tangelo/pkgdata/plugin/watch/python/__init__.py
https://github.com/Kitware/tangelo/blob/470034ee9b3d7a01becc1ce5fddc7adc1d5263ef/tangelo/tangelo/pkgdata/plugin/watch/python/__init__.py#L55-L67
def module_getmtime(filename): """ Get the mtime associated with a module. If this is a .pyc or .pyo file and a corresponding .py file exists, the time of the .py file is returned. :param filename: filename of the module. :returns: mtime or None if the file doesn"t exist. """ if os.path.splitext(filename)[1].lower() in (".pyc", ".pyo") and os.path.exists(filename[:-1]): return os.path.getmtime(filename[:-1]) if os.path.exists(filename): return os.path.getmtime(filename) return None
[ "def", "module_getmtime", "(", "filename", ")", ":", "if", "os", ".", "path", ".", "splitext", "(", "filename", ")", "[", "1", "]", ".", "lower", "(", ")", "in", "(", "\".pyc\"", ",", "\".pyo\"", ")", "and", "os", ".", "path", ".", "exists", "(", "filename", "[", ":", "-", "1", "]", ")", ":", "return", "os", ".", "path", ".", "getmtime", "(", "filename", "[", ":", "-", "1", "]", ")", "if", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "return", "os", ".", "path", ".", "getmtime", "(", "filename", ")", "return", "None" ]
Get the mtime associated with a module. If this is a .pyc or .pyo file and a corresponding .py file exists, the time of the .py file is returned. :param filename: filename of the module. :returns: mtime or None if the file doesn"t exist.
[ "Get", "the", "mtime", "associated", "with", "a", "module", ".", "If", "this", "is", "a", ".", "pyc", "or", ".", "pyo", "file", "and", "a", "corresponding", ".", "py", "file", "exists", "the", "time", "of", "the", ".", "py", "file", "is", "returned", "." ]
python
train
pybel/pybel
src/pybel/struct/filters/node_predicate_builders.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/struct/filters/node_predicate_builders.py#L24-L35
def function_inclusion_filter_builder(func: Strings) -> NodePredicate: """Build a filter that only passes on nodes of the given function(s). :param func: A BEL Function or list/set/tuple of BEL functions """ if isinstance(func, str): return _single_function_inclusion_filter_builder(func) elif isinstance(func, Iterable): return _collection_function_inclusion_builder(func) raise TypeError('Invalid type for argument: {}'.format(func))
[ "def", "function_inclusion_filter_builder", "(", "func", ":", "Strings", ")", "->", "NodePredicate", ":", "if", "isinstance", "(", "func", ",", "str", ")", ":", "return", "_single_function_inclusion_filter_builder", "(", "func", ")", "elif", "isinstance", "(", "func", ",", "Iterable", ")", ":", "return", "_collection_function_inclusion_builder", "(", "func", ")", "raise", "TypeError", "(", "'Invalid type for argument: {}'", ".", "format", "(", "func", ")", ")" ]
Build a filter that only passes on nodes of the given function(s). :param func: A BEL Function or list/set/tuple of BEL functions
[ "Build", "a", "filter", "that", "only", "passes", "on", "nodes", "of", "the", "given", "function", "(", "s", ")", "." ]
python
train
mental32/spotify.py
spotify/models/player.py
https://github.com/mental32/spotify.py/blob/bb296cac7c3dd289908906b7069bd80f43950515/spotify/models/player.py#L111-L122
async def set_repeat(self, state, *, device: Optional[SomeDevice] = None): """Set the repeat mode for the user’s playback. Parameters ---------- state : str Options are repeat-track, repeat-context, and off device : Optional[:obj:`SomeDevice`] The Device object or id of the device this command is targeting. If not supplied, the user’s currently active device is the target. """ await self._user.http.repeat_playback(state, device_id=str(device))
[ "async", "def", "set_repeat", "(", "self", ",", "state", ",", "*", ",", "device", ":", "Optional", "[", "SomeDevice", "]", "=", "None", ")", ":", "await", "self", ".", "_user", ".", "http", ".", "repeat_playback", "(", "state", ",", "device_id", "=", "str", "(", "device", ")", ")" ]
Set the repeat mode for the user’s playback. Parameters ---------- state : str Options are repeat-track, repeat-context, and off device : Optional[:obj:`SomeDevice`] The Device object or id of the device this command is targeting. If not supplied, the user’s currently active device is the target.
[ "Set", "the", "repeat", "mode", "for", "the", "user’s", "playback", "." ]
python
test
markovmodel/PyEMMA
pyemma/_base/serialization/serialization.py
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/_base/serialization/serialization.py#L212-L249
def save(self, file_name, model_name='default', overwrite=False, save_streaming_chain=False): r""" saves the current state of this object to given file and name. Parameters ----------- file_name: str path to desired output file model_name: str, default='default' creates a group named 'model_name' in the given file, which will contain all of the data. If the name already exists, and overwrite is False (default) will raise a RuntimeError. overwrite: bool, default=False Should overwrite existing model names? save_streaming_chain : boolean, default=False if True, the data_producer(s) of this object will also be saved in the given file. Examples -------- >>> import pyemma, numpy as np >>> from pyemma.util.contexts import named_temporary_file >>> m = pyemma.msm.MSM(P=np.array([[0.1, 0.9], [0.9, 0.1]])) >>> with named_temporary_file() as file: # doctest: +SKIP ... m.save(file, 'simple') # doctest: +SKIP ... inst_restored = pyemma.load(file, 'simple') # doctest: +SKIP >>> np.testing.assert_equal(m.P, inst_restored.P) # doctest: +SKIP """ from pyemma._base.serialization.h5file import H5File try: with H5File(file_name=file_name, mode='a') as f: f.add_serializable(model_name, obj=self, overwrite=overwrite, save_streaming_chain=save_streaming_chain) except Exception as e: msg = ('During saving the object {obj}") ' 'the following error occurred: {error}'.format(obj=self, error=e)) if isinstance(self, Loggable): self.logger.exception(msg) else: logger.exception(msg) raise
[ "def", "save", "(", "self", ",", "file_name", ",", "model_name", "=", "'default'", ",", "overwrite", "=", "False", ",", "save_streaming_chain", "=", "False", ")", ":", "from", "pyemma", ".", "_base", ".", "serialization", ".", "h5file", "import", "H5File", "try", ":", "with", "H5File", "(", "file_name", "=", "file_name", ",", "mode", "=", "'a'", ")", "as", "f", ":", "f", ".", "add_serializable", "(", "model_name", ",", "obj", "=", "self", ",", "overwrite", "=", "overwrite", ",", "save_streaming_chain", "=", "save_streaming_chain", ")", "except", "Exception", "as", "e", ":", "msg", "=", "(", "'During saving the object {obj}\") '", "'the following error occurred: {error}'", ".", "format", "(", "obj", "=", "self", ",", "error", "=", "e", ")", ")", "if", "isinstance", "(", "self", ",", "Loggable", ")", ":", "self", ".", "logger", ".", "exception", "(", "msg", ")", "else", ":", "logger", ".", "exception", "(", "msg", ")", "raise" ]
r""" saves the current state of this object to given file and name. Parameters ----------- file_name: str path to desired output file model_name: str, default='default' creates a group named 'model_name' in the given file, which will contain all of the data. If the name already exists, and overwrite is False (default) will raise a RuntimeError. overwrite: bool, default=False Should overwrite existing model names? save_streaming_chain : boolean, default=False if True, the data_producer(s) of this object will also be saved in the given file. Examples -------- >>> import pyemma, numpy as np >>> from pyemma.util.contexts import named_temporary_file >>> m = pyemma.msm.MSM(P=np.array([[0.1, 0.9], [0.9, 0.1]])) >>> with named_temporary_file() as file: # doctest: +SKIP ... m.save(file, 'simple') # doctest: +SKIP ... inst_restored = pyemma.load(file, 'simple') # doctest: +SKIP >>> np.testing.assert_equal(m.P, inst_restored.P) # doctest: +SKIP
[ "r", "saves", "the", "current", "state", "of", "this", "object", "to", "given", "file", "and", "name", "." ]
python
train
genepattern/nbtools
nbtools/jsobject/jsobject.py
https://github.com/genepattern/nbtools/blob/2f74703f59926d8565f9714b1458dc87da8f8574/nbtools/jsobject/jsobject.py#L89-L104
def serialize(self, obj): """Serialize an object for sending to the front-end.""" if hasattr(obj, '_jsid'): return {'immutable': False, 'value': obj._jsid} else: obj_json = {'immutable': True} try: json.dumps(obj) obj_json['value'] = obj except: pass if callable(obj): guid = str(uuid.uuid4()) callback_registry[guid] = obj obj_json['callback'] = guid return obj_json
[ "def", "serialize", "(", "self", ",", "obj", ")", ":", "if", "hasattr", "(", "obj", ",", "'_jsid'", ")", ":", "return", "{", "'immutable'", ":", "False", ",", "'value'", ":", "obj", ".", "_jsid", "}", "else", ":", "obj_json", "=", "{", "'immutable'", ":", "True", "}", "try", ":", "json", ".", "dumps", "(", "obj", ")", "obj_json", "[", "'value'", "]", "=", "obj", "except", ":", "pass", "if", "callable", "(", "obj", ")", ":", "guid", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "callback_registry", "[", "guid", "]", "=", "obj", "obj_json", "[", "'callback'", "]", "=", "guid", "return", "obj_json" ]
Serialize an object for sending to the front-end.
[ "Serialize", "an", "object", "for", "sending", "to", "the", "front", "-", "end", "." ]
python
train
phaethon/kamene
kamene/utils.py
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/utils.py#L41-L48
def str2bytes(x): """Convert input argument to bytes""" if type(x) is bytes: return x elif type(x) is str: return bytes([ ord(i) for i in x ]) else: return str2bytes(str(x))
[ "def", "str2bytes", "(", "x", ")", ":", "if", "type", "(", "x", ")", "is", "bytes", ":", "return", "x", "elif", "type", "(", "x", ")", "is", "str", ":", "return", "bytes", "(", "[", "ord", "(", "i", ")", "for", "i", "in", "x", "]", ")", "else", ":", "return", "str2bytes", "(", "str", "(", "x", ")", ")" ]
Convert input argument to bytes
[ "Convert", "input", "argument", "to", "bytes" ]
python
train
tcalmant/ipopo
pelix/ipopo/instance.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ipopo/instance.py#L184-L192
def bind(self, dependency, svc, svc_ref): # type: (Any, Any, ServiceReference) -> None """ Called by a dependency manager to inject a new service and update the component life cycle. """ with self._lock: self.__set_binding(dependency, svc, svc_ref) self.check_lifecycle()
[ "def", "bind", "(", "self", ",", "dependency", ",", "svc", ",", "svc_ref", ")", ":", "# type: (Any, Any, ServiceReference) -> None", "with", "self", ".", "_lock", ":", "self", ".", "__set_binding", "(", "dependency", ",", "svc", ",", "svc_ref", ")", "self", ".", "check_lifecycle", "(", ")" ]
Called by a dependency manager to inject a new service and update the component life cycle.
[ "Called", "by", "a", "dependency", "manager", "to", "inject", "a", "new", "service", "and", "update", "the", "component", "life", "cycle", "." ]
python
train
StackStorm/pybind
pybind/slxos/v17s_1_02/mpls_config/router/mpls/mpls_cmds_holder/ldp/ldp_holder/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/mpls_config/router/mpls/mpls_cmds_holder/ldp/ldp_holder/__init__.py#L610-L631
def _set_ldp_rcv_label_silence_time(self, v, load=False): """ Setter method for ldp_rcv_label_silence_time, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/ldp/ldp_holder/ldp_rcv_label_silence_time (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_ldp_rcv_label_silence_time is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ldp_rcv_label_silence_time() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'100..60000']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1000), is_leaf=True, yang_name="ldp-rcv-label-silence-time", rest_name="rx-label-silence-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Receive label silence time (100-60000 ms)', u'cli-full-no': None, u'alt-name': u'rx-label-silence-timer'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """ldp_rcv_label_silence_time must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'100..60000']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1000), is_leaf=True, yang_name="ldp-rcv-label-silence-time", rest_name="rx-label-silence-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Receive label silence time (100-60000 ms)', u'cli-full-no': None, u'alt-name': u'rx-label-silence-timer'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)""", }) self.__ldp_rcv_label_silence_time = t if hasattr(self, '_set'): self._set()
[ "def", "_set_ldp_rcv_label_silence_time", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "RestrictedClassType", "(", "base_type", "=", "RestrictedClassType", "(", "base_type", "=", "long", ",", "restriction_dict", "=", "{", "'range'", ":", "[", "'0..4294967295'", "]", "}", ",", "int_size", "=", "32", ")", ",", "restriction_dict", "=", "{", "'range'", ":", "[", "u'100..60000'", "]", "}", ")", ",", "default", "=", "RestrictedClassType", "(", "base_type", "=", "long", ",", "restriction_dict", "=", "{", "'range'", ":", "[", "'0..4294967295'", "]", "}", ",", "int_size", "=", "32", ")", "(", "1000", ")", ",", "is_leaf", "=", "True", ",", "yang_name", "=", "\"ldp-rcv-label-silence-time\"", ",", "rest_name", "=", "\"rx-label-silence-timer\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'cli-full-command'", ":", "None", ",", "u'info'", ":", "u'Receive label silence time (100-60000 ms)'", ",", "u'cli-full-no'", ":", "None", ",", "u'alt-name'", ":", "u'rx-label-silence-timer'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-mpls'", ",", "defining_module", "=", "'brocade-mpls'", ",", "yang_type", "=", "'uint32'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"ldp_rcv_label_silence_time must be of a type compatible with uint32\"\"\"", ",", "'defined-type'", ":", "\"uint32\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'100..60000']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1000), is_leaf=True, yang_name=\"ldp-rcv-label-silence-time\", rest_name=\"rx-label-silence-timer\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Receive label silence time (100-60000 ms)', u'cli-full-no': None, u'alt-name': u'rx-label-silence-timer'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__ldp_rcv_label_silence_time", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for ldp_rcv_label_silence_time, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/ldp/ldp_holder/ldp_rcv_label_silence_time (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_ldp_rcv_label_silence_time is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ldp_rcv_label_silence_time() directly.
[ "Setter", "method", "for", "ldp_rcv_label_silence_time", "mapped", "from", "YANG", "variable", "/", "mpls_config", "/", "router", "/", "mpls", "/", "mpls_cmds_holder", "/", "ldp", "/", "ldp_holder", "/", "ldp_rcv_label_silence_time", "(", "uint32", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_ldp_rcv_label_silence_time", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_ldp_rcv_label_silence_time", "()", "directly", "." ]
python
train
citronneur/rdpy
rdpy/protocol/rfb/rfb.py
https://github.com/citronneur/rdpy/blob/4109b7a6fe2abf3ddbaed54e29d2f31e63ed97f6/rdpy/protocol/rfb/rfb.py#L350-L356
def recvServerInit(self, data): """ Read server init packet @param data: Stream that contains well formed packet """ data.readType(self._serverInit) self.expectWithHeader(4, self.recvServerName)
[ "def", "recvServerInit", "(", "self", ",", "data", ")", ":", "data", ".", "readType", "(", "self", ".", "_serverInit", ")", "self", ".", "expectWithHeader", "(", "4", ",", "self", ".", "recvServerName", ")" ]
Read server init packet @param data: Stream that contains well formed packet
[ "Read", "server", "init", "packet" ]
python
train
dourvaris/nano-python
src/nano/rpc.py
https://github.com/dourvaris/nano-python/blob/f26b8bc895b997067780f925049a70e82c0c2479/src/nano/rpc.py#L1430-L1449
def krai_from_raw(self, amount): """ Divide a raw amount down by the krai ratio. :param amount: Amount in raw to convert to krai :type amount: int :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.krai_from_raw(amount=1000000000000000000000000000) 1 """ amount = self._process_value(amount, 'int') payload = {"amount": amount} resp = self.call('krai_from_raw', payload) return int(resp['amount'])
[ "def", "krai_from_raw", "(", "self", ",", "amount", ")", ":", "amount", "=", "self", ".", "_process_value", "(", "amount", ",", "'int'", ")", "payload", "=", "{", "\"amount\"", ":", "amount", "}", "resp", "=", "self", ".", "call", "(", "'krai_from_raw'", ",", "payload", ")", "return", "int", "(", "resp", "[", "'amount'", "]", ")" ]
Divide a raw amount down by the krai ratio. :param amount: Amount in raw to convert to krai :type amount: int :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.krai_from_raw(amount=1000000000000000000000000000) 1
[ "Divide", "a", "raw", "amount", "down", "by", "the", "krai", "ratio", "." ]
python
train
openai/baselines
baselines/common/input.py
https://github.com/openai/baselines/blob/3301089b48c42b87b396e246ea3f56fa4bfc9678/baselines/common/input.py#L5-L31
def observation_placeholder(ob_space, batch_size=None, name='Ob'): ''' Create placeholder to feed observations into of the size appropriate to the observation space Parameters: ---------- ob_space: gym.Space observation space batch_size: int size of the batch to be fed into input. Can be left None in most cases. name: str name of the placeholder Returns: ------- tensorflow placeholder tensor ''' assert isinstance(ob_space, Discrete) or isinstance(ob_space, Box) or isinstance(ob_space, MultiDiscrete), \ 'Can only deal with Discrete and Box observation spaces for now' dtype = ob_space.dtype if dtype == np.int8: dtype = np.uint8 return tf.placeholder(shape=(batch_size,) + ob_space.shape, dtype=dtype, name=name)
[ "def", "observation_placeholder", "(", "ob_space", ",", "batch_size", "=", "None", ",", "name", "=", "'Ob'", ")", ":", "assert", "isinstance", "(", "ob_space", ",", "Discrete", ")", "or", "isinstance", "(", "ob_space", ",", "Box", ")", "or", "isinstance", "(", "ob_space", ",", "MultiDiscrete", ")", ",", "'Can only deal with Discrete and Box observation spaces for now'", "dtype", "=", "ob_space", ".", "dtype", "if", "dtype", "==", "np", ".", "int8", ":", "dtype", "=", "np", ".", "uint8", "return", "tf", ".", "placeholder", "(", "shape", "=", "(", "batch_size", ",", ")", "+", "ob_space", ".", "shape", ",", "dtype", "=", "dtype", ",", "name", "=", "name", ")" ]
Create placeholder to feed observations into of the size appropriate to the observation space Parameters: ---------- ob_space: gym.Space observation space batch_size: int size of the batch to be fed into input. Can be left None in most cases. name: str name of the placeholder Returns: ------- tensorflow placeholder tensor
[ "Create", "placeholder", "to", "feed", "observations", "into", "of", "the", "size", "appropriate", "to", "the", "observation", "space" ]
python
valid
profitbricks/profitbricks-sdk-python
examples/pb_deleteServer.py
https://github.com/profitbricks/profitbricks-sdk-python/blob/2c804b141688eccb07d6ae56601d5c60a62abebd/examples/pb_deleteServer.py#L136-L173
def getServerStates(pbclient=None, dc_id=None, serverid=None, servername=None): ''' gets states of a server''' if pbclient is None: raise ValueError("argument 'pbclient' must not be None") if dc_id is None: raise ValueError("argument 'dc_id' must not be None") server = None if serverid is None: if servername is None: raise ValueError("one of 'serverid' or 'servername' must be specified") # so, arg.servername is set (to whatever) server_info = select_where(getServerInfo(pbclient, dc_id), ['id', 'name', 'state', 'vmstate'], name=servername) if len(server_info) > 1: raise NameError("ambiguous server name '{}'".format(servername)) if len(server_info) == 1: server = server_info[0] else: # get by ID may also fail if it's removed # in this case, catch exception (message 404) and be quiet for a while # unfortunately this has changed from Py2 to Py3 try: server_info = pbclient.get_server(dc_id, serverid, 1) server = dict(id=server_info['id'], name=server_info['properties']['name'], state=server_info['metadata']['state'], vmstate=server_info['properties']['vmState']) except Exception: ex = sys.exc_info()[1] if ex.args[0] is not None and ex.args[0] == 404: print("Server w/ ID {} not found".format(serverid)) server = None else: raise ex # end try/except # end if/else(serverid) return server
[ "def", "getServerStates", "(", "pbclient", "=", "None", ",", "dc_id", "=", "None", ",", "serverid", "=", "None", ",", "servername", "=", "None", ")", ":", "if", "pbclient", "is", "None", ":", "raise", "ValueError", "(", "\"argument 'pbclient' must not be None\"", ")", "if", "dc_id", "is", "None", ":", "raise", "ValueError", "(", "\"argument 'dc_id' must not be None\"", ")", "server", "=", "None", "if", "serverid", "is", "None", ":", "if", "servername", "is", "None", ":", "raise", "ValueError", "(", "\"one of 'serverid' or 'servername' must be specified\"", ")", "# so, arg.servername is set (to whatever)", "server_info", "=", "select_where", "(", "getServerInfo", "(", "pbclient", ",", "dc_id", ")", ",", "[", "'id'", ",", "'name'", ",", "'state'", ",", "'vmstate'", "]", ",", "name", "=", "servername", ")", "if", "len", "(", "server_info", ")", ">", "1", ":", "raise", "NameError", "(", "\"ambiguous server name '{}'\"", ".", "format", "(", "servername", ")", ")", "if", "len", "(", "server_info", ")", "==", "1", ":", "server", "=", "server_info", "[", "0", "]", "else", ":", "# get by ID may also fail if it's removed", "# in this case, catch exception (message 404) and be quiet for a while", "# unfortunately this has changed from Py2 to Py3", "try", ":", "server_info", "=", "pbclient", ".", "get_server", "(", "dc_id", ",", "serverid", ",", "1", ")", "server", "=", "dict", "(", "id", "=", "server_info", "[", "'id'", "]", ",", "name", "=", "server_info", "[", "'properties'", "]", "[", "'name'", "]", ",", "state", "=", "server_info", "[", "'metadata'", "]", "[", "'state'", "]", ",", "vmstate", "=", "server_info", "[", "'properties'", "]", "[", "'vmState'", "]", ")", "except", "Exception", ":", "ex", "=", "sys", ".", "exc_info", "(", ")", "[", "1", "]", "if", "ex", ".", "args", "[", "0", "]", "is", "not", "None", "and", "ex", ".", "args", "[", "0", "]", "==", "404", ":", "print", "(", "\"Server w/ ID {} not found\"", ".", "format", "(", "serverid", ")", ")", "server", "=", "None", "else", ":", "raise", "ex", "# end try/except", "# end if/else(serverid)", "return", "server" ]
gets states of a server
[ "gets", "states", "of", "a", "server" ]
python
valid
NeuroanatomyAndConnectivity/surfdist
nipype/surfdist_nipype.py
https://github.com/NeuroanatomyAndConnectivity/surfdist/blob/849fdfbb2822ff1aa530a3b0bc955a4312e3edf1/nipype/surfdist_nipype.py#L20-L56
def calc_surfdist(surface, labels, annot, reg, origin, target): import nibabel as nib import numpy as np import os from surfdist import load, utils, surfdist import csv """ inputs: surface - surface file (e.g. lh.pial, with full path) labels - label file (e.g. lh.cortex.label, with full path) annot - annot file (e.g. lh.aparc.a2009s.annot, with full path) reg - registration file (lh.sphere.reg) origin - the label from which we calculate distances target - target surface (e.g. fsaverage4) """ # Load stuff surf = nib.freesurfer.read_geometry(surface) cort = np.sort(nib.freesurfer.read_label(labels)) src = load.load_freesurfer_label(annot, origin, cort) # Calculate distances dist = surfdist.dist_calc(surf, cort, src) # Project distances to target trg = nib.freesurfer.read_geometry(target)[0] native = nib.freesurfer.read_geometry(reg)[0] idx_trg_to_native = utils.find_node_match(trg, native)[0] # Get indices in trg space distt = dist[idx_trg_to_native] # Write to file and return file handle filename = os.path.join(os.getcwd(),'distances.csv') distt.tofile(filename,sep=",") return filename
[ "def", "calc_surfdist", "(", "surface", ",", "labels", ",", "annot", ",", "reg", ",", "origin", ",", "target", ")", ":", "import", "nibabel", "as", "nib", "import", "numpy", "as", "np", "import", "os", "from", "surfdist", "import", "load", ",", "utils", ",", "surfdist", "import", "csv", "# Load stuff", "surf", "=", "nib", ".", "freesurfer", ".", "read_geometry", "(", "surface", ")", "cort", "=", "np", ".", "sort", "(", "nib", ".", "freesurfer", ".", "read_label", "(", "labels", ")", ")", "src", "=", "load", ".", "load_freesurfer_label", "(", "annot", ",", "origin", ",", "cort", ")", "# Calculate distances", "dist", "=", "surfdist", ".", "dist_calc", "(", "surf", ",", "cort", ",", "src", ")", "# Project distances to target", "trg", "=", "nib", ".", "freesurfer", ".", "read_geometry", "(", "target", ")", "[", "0", "]", "native", "=", "nib", ".", "freesurfer", ".", "read_geometry", "(", "reg", ")", "[", "0", "]", "idx_trg_to_native", "=", "utils", ".", "find_node_match", "(", "trg", ",", "native", ")", "[", "0", "]", "# Get indices in trg space ", "distt", "=", "dist", "[", "idx_trg_to_native", "]", "# Write to file and return file handle", "filename", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "'distances.csv'", ")", "distt", ".", "tofile", "(", "filename", ",", "sep", "=", "\",\"", ")", "return", "filename" ]
inputs: surface - surface file (e.g. lh.pial, with full path) labels - label file (e.g. lh.cortex.label, with full path) annot - annot file (e.g. lh.aparc.a2009s.annot, with full path) reg - registration file (lh.sphere.reg) origin - the label from which we calculate distances target - target surface (e.g. fsaverage4)
[ "inputs", ":", "surface", "-", "surface", "file", "(", "e", ".", "g", ".", "lh", ".", "pial", "with", "full", "path", ")", "labels", "-", "label", "file", "(", "e", ".", "g", ".", "lh", ".", "cortex", ".", "label", "with", "full", "path", ")", "annot", "-", "annot", "file", "(", "e", ".", "g", ".", "lh", ".", "aparc", ".", "a2009s", ".", "annot", "with", "full", "path", ")", "reg", "-", "registration", "file", "(", "lh", ".", "sphere", ".", "reg", ")", "origin", "-", "the", "label", "from", "which", "we", "calculate", "distances", "target", "-", "target", "surface", "(", "e", ".", "g", ".", "fsaverage4", ")" ]
python
train
google/grr
grr/client/grr_response_client/comms.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/comms.py#L468-L478
def Wait(self): """Wait until the next action is needed.""" time.sleep(self.sleep_time - int(self.sleep_time)) # Split a long sleep interval into 1 second intervals so we can heartbeat. for _ in range(int(self.sleep_time)): time.sleep(1) # Back off slowly at first and fast if no answer. self.sleep_time = min(self.poll_max, max(self.poll_min, self.sleep_time) * self.poll_slew)
[ "def", "Wait", "(", "self", ")", ":", "time", ".", "sleep", "(", "self", ".", "sleep_time", "-", "int", "(", "self", ".", "sleep_time", ")", ")", "# Split a long sleep interval into 1 second intervals so we can heartbeat.", "for", "_", "in", "range", "(", "int", "(", "self", ".", "sleep_time", ")", ")", ":", "time", ".", "sleep", "(", "1", ")", "# Back off slowly at first and fast if no answer.", "self", ".", "sleep_time", "=", "min", "(", "self", ".", "poll_max", ",", "max", "(", "self", ".", "poll_min", ",", "self", ".", "sleep_time", ")", "*", "self", ".", "poll_slew", ")" ]
Wait until the next action is needed.
[ "Wait", "until", "the", "next", "action", "is", "needed", "." ]
python
train
inveniosoftware-attic/invenio-utils
invenio_utils/url.py
https://github.com/inveniosoftware-attic/invenio-utils/blob/9a1c6db4e3f1370901f329f510480dd8df188296/invenio_utils/url.py#L291-L320
def create_html_link(urlbase, urlargd, link_label, linkattrd=None, escape_urlargd=True, escape_linkattrd=True, urlhash=None): """Creates a W3C compliant link. @param urlbase: base url (e.g. config.CFG_SITE_URL/search) @param urlargd: dictionary of parameters. (e.g. p={'recid':3, 'of'='hb'}) @param link_label: text displayed in a browser (has to be already escaped) @param linkattrd: dictionary of attributes (e.g. a={'class': 'img'}) @param escape_urlargd: boolean indicating if the function should escape arguments (e.g. < becomes &lt; or " becomes &quot;) @param escape_linkattrd: boolean indicating if the function should escape attributes (e.g. < becomes &lt; or " becomes &quot;) @param urlhash: hash string to add at the end of the link """ attributes_separator = ' ' output = '<a href="' + \ create_url(urlbase, urlargd, escape_urlargd, urlhash) + '"' if linkattrd: output += ' ' if escape_linkattrd: attributes = [escape(str(key), quote=True) + '="' + escape(str(linkattrd[key]), quote=True) + '"' for key in linkattrd.keys()] else: attributes = [str(key) + '="' + str(linkattrd[key]) + '"' for key in linkattrd.keys()] output += attributes_separator.join(attributes) output = wash_for_utf8(output) output += '>' + wash_for_utf8(link_label) + '</a>' return output
[ "def", "create_html_link", "(", "urlbase", ",", "urlargd", ",", "link_label", ",", "linkattrd", "=", "None", ",", "escape_urlargd", "=", "True", ",", "escape_linkattrd", "=", "True", ",", "urlhash", "=", "None", ")", ":", "attributes_separator", "=", "' '", "output", "=", "'<a href=\"'", "+", "create_url", "(", "urlbase", ",", "urlargd", ",", "escape_urlargd", ",", "urlhash", ")", "+", "'\"'", "if", "linkattrd", ":", "output", "+=", "' '", "if", "escape_linkattrd", ":", "attributes", "=", "[", "escape", "(", "str", "(", "key", ")", ",", "quote", "=", "True", ")", "+", "'=\"'", "+", "escape", "(", "str", "(", "linkattrd", "[", "key", "]", ")", ",", "quote", "=", "True", ")", "+", "'\"'", "for", "key", "in", "linkattrd", ".", "keys", "(", ")", "]", "else", ":", "attributes", "=", "[", "str", "(", "key", ")", "+", "'=\"'", "+", "str", "(", "linkattrd", "[", "key", "]", ")", "+", "'\"'", "for", "key", "in", "linkattrd", ".", "keys", "(", ")", "]", "output", "+=", "attributes_separator", ".", "join", "(", "attributes", ")", "output", "=", "wash_for_utf8", "(", "output", ")", "output", "+=", "'>'", "+", "wash_for_utf8", "(", "link_label", ")", "+", "'</a>'", "return", "output" ]
Creates a W3C compliant link. @param urlbase: base url (e.g. config.CFG_SITE_URL/search) @param urlargd: dictionary of parameters. (e.g. p={'recid':3, 'of'='hb'}) @param link_label: text displayed in a browser (has to be already escaped) @param linkattrd: dictionary of attributes (e.g. a={'class': 'img'}) @param escape_urlargd: boolean indicating if the function should escape arguments (e.g. < becomes &lt; or " becomes &quot;) @param escape_linkattrd: boolean indicating if the function should escape attributes (e.g. < becomes &lt; or " becomes &quot;) @param urlhash: hash string to add at the end of the link
[ "Creates", "a", "W3C", "compliant", "link", "." ]
python
train
dourvaris/nano-python
src/nano/rpc.py
https://github.com/dourvaris/nano-python/blob/f26b8bc895b997067780f925049a70e82c0c2479/src/nano/rpc.py#L1475-L1495
def rai_from_raw(self, amount): """ Divide a raw amount down by the rai ratio. :param amount: Amount in raw to convert to rai :type amount: int :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.rai_from_raw(amount=1000000000000000000000000) 1 """ amount = self._process_value(amount, 'int') payload = {"amount": amount} resp = self.call('rai_from_raw', payload) return int(resp['amount'])
[ "def", "rai_from_raw", "(", "self", ",", "amount", ")", ":", "amount", "=", "self", ".", "_process_value", "(", "amount", ",", "'int'", ")", "payload", "=", "{", "\"amount\"", ":", "amount", "}", "resp", "=", "self", ".", "call", "(", "'rai_from_raw'", ",", "payload", ")", "return", "int", "(", "resp", "[", "'amount'", "]", ")" ]
Divide a raw amount down by the rai ratio. :param amount: Amount in raw to convert to rai :type amount: int :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.rai_from_raw(amount=1000000000000000000000000) 1
[ "Divide", "a", "raw", "amount", "down", "by", "the", "rai", "ratio", "." ]
python
train
fukuball/fuku-ml
FukuML/Utility.py
https://github.com/fukuball/fuku-ml/blob/0da15ad7af76adf344b5a6b3f3dbabbbab3446b0/FukuML/Utility.py#L20-L40
def load(input_data_file='', data_type='float'): """load file""" X = [] Y = [] if data_type == 'float': with open(input_data_file) as f: for line in f: data = line.split() x = [1] + [float(v) for v in data[:-1]] X.append(x) Y.append(float(data[-1])) else: with open(input_data_file) as f: for line in f: data = line.split() x = [1] + [v for v in data[:-1]] X.append(x) Y.append(data[-1]) return np.array(X), np.array(Y)
[ "def", "load", "(", "input_data_file", "=", "''", ",", "data_type", "=", "'float'", ")", ":", "X", "=", "[", "]", "Y", "=", "[", "]", "if", "data_type", "==", "'float'", ":", "with", "open", "(", "input_data_file", ")", "as", "f", ":", "for", "line", "in", "f", ":", "data", "=", "line", ".", "split", "(", ")", "x", "=", "[", "1", "]", "+", "[", "float", "(", "v", ")", "for", "v", "in", "data", "[", ":", "-", "1", "]", "]", "X", ".", "append", "(", "x", ")", "Y", ".", "append", "(", "float", "(", "data", "[", "-", "1", "]", ")", ")", "else", ":", "with", "open", "(", "input_data_file", ")", "as", "f", ":", "for", "line", "in", "f", ":", "data", "=", "line", ".", "split", "(", ")", "x", "=", "[", "1", "]", "+", "[", "v", "for", "v", "in", "data", "[", ":", "-", "1", "]", "]", "X", ".", "append", "(", "x", ")", "Y", ".", "append", "(", "data", "[", "-", "1", "]", ")", "return", "np", ".", "array", "(", "X", ")", ",", "np", ".", "array", "(", "Y", ")" ]
load file
[ "load", "file" ]
python
test
plandes/actioncli
src/python/zensols/actioncli/persist.py
https://github.com/plandes/actioncli/blob/d1c4ea27e6f3394b30a1652ddd4b916160662773/src/python/zensols/actioncli/persist.py#L602-L610
def shelve(self): """Return an opened shelve object. """ logger.info('creating shelve data') fname = str(self.create_path.absolute()) inst = sh.open(fname, writeback=self.writeback) self.is_open = True return inst
[ "def", "shelve", "(", "self", ")", ":", "logger", ".", "info", "(", "'creating shelve data'", ")", "fname", "=", "str", "(", "self", ".", "create_path", ".", "absolute", "(", ")", ")", "inst", "=", "sh", ".", "open", "(", "fname", ",", "writeback", "=", "self", ".", "writeback", ")", "self", ".", "is_open", "=", "True", "return", "inst" ]
Return an opened shelve object.
[ "Return", "an", "opened", "shelve", "object", "." ]
python
train
BernardFW/bernard
src/bernard/platforms/telegram/platform.py
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/telegram/platform.py#L396-L433
async def self_check(cls): """ Check that the configuration is correct - Presence of "token" in the settings - Presence of "BERNARD_BASE_URL" in the global configuration """ # noinspection PyTypeChecker async for check in super(Telegram, cls).self_check(): yield check s = cls.settings() try: assert isinstance(s['token'], str) except (KeyError, TypeError, AssertionError): yield HealthCheckFail( '00005', 'Missing "token" for Telegram platform. You can obtain one by' 'registering your bot in Telegram.', ) if not hasattr(settings, 'BERNARD_BASE_URL'): yield HealthCheckFail( '00005', '"BERNARD_BASE_URL" cannot be found in the configuration. The' 'Telegram platform needs it because it uses it to ' 'automatically register its hook.' ) if not hasattr(settings, 'WEBVIEW_SECRET_KEY'): yield HealthCheckFail( '00005', '"WEBVIEW_SECRET_KEY" cannot be found in the configuration. ' 'It is required in order to be able to create secure postback ' 'URLs.' )
[ "async", "def", "self_check", "(", "cls", ")", ":", "# noinspection PyTypeChecker", "async", "for", "check", "in", "super", "(", "Telegram", ",", "cls", ")", ".", "self_check", "(", ")", ":", "yield", "check", "s", "=", "cls", ".", "settings", "(", ")", "try", ":", "assert", "isinstance", "(", "s", "[", "'token'", "]", ",", "str", ")", "except", "(", "KeyError", ",", "TypeError", ",", "AssertionError", ")", ":", "yield", "HealthCheckFail", "(", "'00005'", ",", "'Missing \"token\" for Telegram platform. You can obtain one by'", "'registering your bot in Telegram.'", ",", ")", "if", "not", "hasattr", "(", "settings", ",", "'BERNARD_BASE_URL'", ")", ":", "yield", "HealthCheckFail", "(", "'00005'", ",", "'\"BERNARD_BASE_URL\" cannot be found in the configuration. The'", "'Telegram platform needs it because it uses it to '", "'automatically register its hook.'", ")", "if", "not", "hasattr", "(", "settings", ",", "'WEBVIEW_SECRET_KEY'", ")", ":", "yield", "HealthCheckFail", "(", "'00005'", ",", "'\"WEBVIEW_SECRET_KEY\" cannot be found in the configuration. '", "'It is required in order to be able to create secure postback '", "'URLs.'", ")" ]
Check that the configuration is correct - Presence of "token" in the settings - Presence of "BERNARD_BASE_URL" in the global configuration
[ "Check", "that", "the", "configuration", "is", "correct" ]
python
train
pennersr/django-allauth
allauth/socialaccount/providers/bitbucket_oauth2/views.py
https://github.com/pennersr/django-allauth/blob/f70cb3d622f992f15fe9b57098e0b328445b664e/allauth/socialaccount/providers/bitbucket_oauth2/views.py#L29-L42
def get_email(self, token): """Fetches email address from email API endpoint""" resp = requests.get(self.emails_url, params={'access_token': token.token}) emails = resp.json().get('values', []) email = '' try: email = emails[0].get('email') primary_emails = [e for e in emails if e.get('is_primary', False)] email = primary_emails[0].get('email') except (IndexError, TypeError, KeyError): return '' finally: return email
[ "def", "get_email", "(", "self", ",", "token", ")", ":", "resp", "=", "requests", ".", "get", "(", "self", ".", "emails_url", ",", "params", "=", "{", "'access_token'", ":", "token", ".", "token", "}", ")", "emails", "=", "resp", ".", "json", "(", ")", ".", "get", "(", "'values'", ",", "[", "]", ")", "email", "=", "''", "try", ":", "email", "=", "emails", "[", "0", "]", ".", "get", "(", "'email'", ")", "primary_emails", "=", "[", "e", "for", "e", "in", "emails", "if", "e", ".", "get", "(", "'is_primary'", ",", "False", ")", "]", "email", "=", "primary_emails", "[", "0", "]", ".", "get", "(", "'email'", ")", "except", "(", "IndexError", ",", "TypeError", ",", "KeyError", ")", ":", "return", "''", "finally", ":", "return", "email" ]
Fetches email address from email API endpoint
[ "Fetches", "email", "address", "from", "email", "API", "endpoint" ]
python
train
flowersteam/explauto
explauto/sensorimotor_model/inverse/cma.py
https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/sensorimotor_model/inverse/cma.py#L4826-L4928
def _update(self, es): """Test termination criteria and update dictionary """ if es is None: es = self.es assert es is not None if es.countiter == 0: # in this case termination tests fail self.__init__() return self self.lastiter = es.countiter self.es = es self.clear() # compute conditions from scratch N = es.N opts = es.opts self.opts = opts # a hack to get _addstop going # fitness: generic criterion, user defined w/o default self._addstop('ftarget', es.best.f < opts['ftarget']) # maxiter, maxfevals: generic criteria self._addstop('maxfevals', es.countevals - 1 >= opts['maxfevals']) self._addstop('maxiter', ## meta_parameters.maxiter_multiplier == 1.0 es.countiter >= 1.0 * opts['maxiter']) # tolx, tolfacupx: generic criteria # tolfun, tolfunhist (CEC:tolfun includes hist) self._addstop('tolx', all([es.sigma * xi < opts['tolx'] for xi in es.sigma_vec * es.pc]) and all([es.sigma * xi < opts['tolx'] for xi in es.sigma_vec * sqrt(es.dC)])) self._addstop('tolfacupx', any(es.sigma * es.sigma_vec * sqrt(es.dC) > es.sigma0 * es.sigma_vec0 * opts['tolfacupx'])) self._addstop('tolfun', es.fit.fit[-1] - es.fit.fit[0] < opts['tolfun'] and max(es.fit.hist) - min(es.fit.hist) < opts['tolfun']) self._addstop('tolfunhist', len(es.fit.hist) > 9 and max(es.fit.hist) - min(es.fit.hist) < opts['tolfunhist']) # worst seen false positive: table N=80,lam=80, getting worse for fevals=35e3 \approx 50 * N**1.5 # but the median is not so much getting worse # / 5 reflects the sparsity of histbest/median # / 2 reflects the left and right part to be compared ## meta_parameters.tolstagnation_multiplier == 1.0 l = int(max(( 1.0 * opts['tolstagnation'] / 5. / 2, len(es.fit.histbest) / 10))) # TODO: why max(..., len(histbest)/10) ??? # TODO: the problem in the beginning is only with best ==> ??? # equality should handle flat fitness self._addstop('tolstagnation', # leads sometimes early stop on ftablet, fcigtab, N>=50? 1 < 3 and opts['tolstagnation'] and es.countiter > N * (5 + 100 / es.popsize) and len(es.fit.histbest) > 100 and 2 * l < len(es.fit.histbest) and np.median(es.fit.histmedian[:l]) >= np.median(es.fit.histmedian[l:2 * l]) and np.median(es.fit.histbest[:l]) >= np.median(es.fit.histbest[l:2 * l])) # iiinteger: stagnation termination can prevent to find the optimum self._addstop('tolupsigma', opts['tolupsigma'] and es.sigma / np.max(es.D) > es.sigma0 * opts['tolupsigma']) if 1 < 3: # non-user defined, method specific # noeffectaxis (CEC: 0.1sigma), noeffectcoord (CEC:0.2sigma), conditioncov idx = np.where(es.mean == es.mean + 0.2 * es.sigma * es.sigma_vec * es.dC**0.5)[0] self._addstop('noeffectcoord', any(idx), idx) # any([es.mean[i] == es.mean[i] + 0.2 * es.sigma * # (es.sigma_vec if isscalar(es.sigma_vec) else es.sigma_vec[i]) * # sqrt(es.dC[i]) # for i in xrange(N)]) # ) if opts['CMA_diagonal'] is not True and es.countiter > opts['CMA_diagonal']: i = es.countiter % N self._addstop('noeffectaxis', sum(es.mean == es.mean + 0.1 * es.sigma * es.D[i] * es.B[:, i]) == N) self._addstop('conditioncov', es.D[-1] > 1e7 * es.D[0], 1e14) # TODO self._addstop('callback', es.callbackstop) # termination_callback try: with open(self.opts['signals_filename'], 'r') as f: for line in f.readlines(): words = line.split() if len(words) < 2 or words[0].startswith(('#', '%')): continue if words[0] == 'stop' and words[1] == 'now': if len(words) > 2 and not words[2].startswith( self.opts['verb_filenameprefix']): continue self._addstop('file_signal', True, "stop now") break except IOError: pass if len(self): self._addstop('flat fitness: please (re)consider how to compute the fitness more elaborate', len(es.fit.hist) > 9 and max(es.fit.hist) == min(es.fit.hist)) return self
[ "def", "_update", "(", "self", ",", "es", ")", ":", "if", "es", "is", "None", ":", "es", "=", "self", ".", "es", "assert", "es", "is", "not", "None", "if", "es", ".", "countiter", "==", "0", ":", "# in this case termination tests fail", "self", ".", "__init__", "(", ")", "return", "self", "self", ".", "lastiter", "=", "es", ".", "countiter", "self", ".", "es", "=", "es", "self", ".", "clear", "(", ")", "# compute conditions from scratch", "N", "=", "es", ".", "N", "opts", "=", "es", ".", "opts", "self", ".", "opts", "=", "opts", "# a hack to get _addstop going", "# fitness: generic criterion, user defined w/o default", "self", ".", "_addstop", "(", "'ftarget'", ",", "es", ".", "best", ".", "f", "<", "opts", "[", "'ftarget'", "]", ")", "# maxiter, maxfevals: generic criteria", "self", ".", "_addstop", "(", "'maxfevals'", ",", "es", ".", "countevals", "-", "1", ">=", "opts", "[", "'maxfevals'", "]", ")", "self", ".", "_addstop", "(", "'maxiter'", ",", "## meta_parameters.maxiter_multiplier == 1.0", "es", ".", "countiter", ">=", "1.0", "*", "opts", "[", "'maxiter'", "]", ")", "# tolx, tolfacupx: generic criteria", "# tolfun, tolfunhist (CEC:tolfun includes hist)", "self", ".", "_addstop", "(", "'tolx'", ",", "all", "(", "[", "es", ".", "sigma", "*", "xi", "<", "opts", "[", "'tolx'", "]", "for", "xi", "in", "es", ".", "sigma_vec", "*", "es", ".", "pc", "]", ")", "and", "all", "(", "[", "es", ".", "sigma", "*", "xi", "<", "opts", "[", "'tolx'", "]", "for", "xi", "in", "es", ".", "sigma_vec", "*", "sqrt", "(", "es", ".", "dC", ")", "]", ")", ")", "self", ".", "_addstop", "(", "'tolfacupx'", ",", "any", "(", "es", ".", "sigma", "*", "es", ".", "sigma_vec", "*", "sqrt", "(", "es", ".", "dC", ")", ">", "es", ".", "sigma0", "*", "es", ".", "sigma_vec0", "*", "opts", "[", "'tolfacupx'", "]", ")", ")", "self", ".", "_addstop", "(", "'tolfun'", ",", "es", ".", "fit", ".", "fit", "[", "-", "1", "]", "-", "es", ".", "fit", ".", "fit", "[", "0", "]", "<", "opts", "[", "'tolfun'", "]", "and", "max", "(", "es", ".", "fit", ".", "hist", ")", "-", "min", "(", "es", ".", "fit", ".", "hist", ")", "<", "opts", "[", "'tolfun'", "]", ")", "self", ".", "_addstop", "(", "'tolfunhist'", ",", "len", "(", "es", ".", "fit", ".", "hist", ")", ">", "9", "and", "max", "(", "es", ".", "fit", ".", "hist", ")", "-", "min", "(", "es", ".", "fit", ".", "hist", ")", "<", "opts", "[", "'tolfunhist'", "]", ")", "# worst seen false positive: table N=80,lam=80, getting worse for fevals=35e3 \\approx 50 * N**1.5", "# but the median is not so much getting worse", "# / 5 reflects the sparsity of histbest/median", "# / 2 reflects the left and right part to be compared", "## meta_parameters.tolstagnation_multiplier == 1.0", "l", "=", "int", "(", "max", "(", "(", "1.0", "*", "opts", "[", "'tolstagnation'", "]", "/", "5.", "/", "2", ",", "len", "(", "es", ".", "fit", ".", "histbest", ")", "/", "10", ")", ")", ")", "# TODO: why max(..., len(histbest)/10) ???", "# TODO: the problem in the beginning is only with best ==> ???", "# equality should handle flat fitness", "self", ".", "_addstop", "(", "'tolstagnation'", ",", "# leads sometimes early stop on ftablet, fcigtab, N>=50?", "1", "<", "3", "and", "opts", "[", "'tolstagnation'", "]", "and", "es", ".", "countiter", ">", "N", "*", "(", "5", "+", "100", "/", "es", ".", "popsize", ")", "and", "len", "(", "es", ".", "fit", ".", "histbest", ")", ">", "100", "and", "2", "*", "l", "<", "len", "(", "es", ".", "fit", ".", "histbest", ")", "and", "np", ".", "median", "(", "es", ".", "fit", ".", "histmedian", "[", ":", "l", "]", ")", ">=", "np", ".", "median", "(", "es", ".", "fit", ".", "histmedian", "[", "l", ":", "2", "*", "l", "]", ")", "and", "np", ".", "median", "(", "es", ".", "fit", ".", "histbest", "[", ":", "l", "]", ")", ">=", "np", ".", "median", "(", "es", ".", "fit", ".", "histbest", "[", "l", ":", "2", "*", "l", "]", ")", ")", "# iiinteger: stagnation termination can prevent to find the optimum", "self", ".", "_addstop", "(", "'tolupsigma'", ",", "opts", "[", "'tolupsigma'", "]", "and", "es", ".", "sigma", "/", "np", ".", "max", "(", "es", ".", "D", ")", ">", "es", ".", "sigma0", "*", "opts", "[", "'tolupsigma'", "]", ")", "if", "1", "<", "3", ":", "# non-user defined, method specific", "# noeffectaxis (CEC: 0.1sigma), noeffectcoord (CEC:0.2sigma), conditioncov", "idx", "=", "np", ".", "where", "(", "es", ".", "mean", "==", "es", ".", "mean", "+", "0.2", "*", "es", ".", "sigma", "*", "es", ".", "sigma_vec", "*", "es", ".", "dC", "**", "0.5", ")", "[", "0", "]", "self", ".", "_addstop", "(", "'noeffectcoord'", ",", "any", "(", "idx", ")", ",", "idx", ")", "# any([es.mean[i] == es.mean[i] + 0.2 * es.sigma *", "# (es.sigma_vec if isscalar(es.sigma_vec) else es.sigma_vec[i]) *", "# sqrt(es.dC[i])", "# for i in xrange(N)])", "# )", "if", "opts", "[", "'CMA_diagonal'", "]", "is", "not", "True", "and", "es", ".", "countiter", ">", "opts", "[", "'CMA_diagonal'", "]", ":", "i", "=", "es", ".", "countiter", "%", "N", "self", ".", "_addstop", "(", "'noeffectaxis'", ",", "sum", "(", "es", ".", "mean", "==", "es", ".", "mean", "+", "0.1", "*", "es", ".", "sigma", "*", "es", ".", "D", "[", "i", "]", "*", "es", ".", "B", "[", ":", ",", "i", "]", ")", "==", "N", ")", "self", ".", "_addstop", "(", "'conditioncov'", ",", "es", ".", "D", "[", "-", "1", "]", ">", "1e7", "*", "es", ".", "D", "[", "0", "]", ",", "1e14", ")", "# TODO", "self", ".", "_addstop", "(", "'callback'", ",", "es", ".", "callbackstop", ")", "# termination_callback", "try", ":", "with", "open", "(", "self", ".", "opts", "[", "'signals_filename'", "]", ",", "'r'", ")", "as", "f", ":", "for", "line", "in", "f", ".", "readlines", "(", ")", ":", "words", "=", "line", ".", "split", "(", ")", "if", "len", "(", "words", ")", "<", "2", "or", "words", "[", "0", "]", ".", "startswith", "(", "(", "'#'", ",", "'%'", ")", ")", ":", "continue", "if", "words", "[", "0", "]", "==", "'stop'", "and", "words", "[", "1", "]", "==", "'now'", ":", "if", "len", "(", "words", ")", ">", "2", "and", "not", "words", "[", "2", "]", ".", "startswith", "(", "self", ".", "opts", "[", "'verb_filenameprefix'", "]", ")", ":", "continue", "self", ".", "_addstop", "(", "'file_signal'", ",", "True", ",", "\"stop now\"", ")", "break", "except", "IOError", ":", "pass", "if", "len", "(", "self", ")", ":", "self", ".", "_addstop", "(", "'flat fitness: please (re)consider how to compute the fitness more elaborate'", ",", "len", "(", "es", ".", "fit", ".", "hist", ")", ">", "9", "and", "max", "(", "es", ".", "fit", ".", "hist", ")", "==", "min", "(", "es", ".", "fit", ".", "hist", ")", ")", "return", "self" ]
Test termination criteria and update dictionary
[ "Test", "termination", "criteria", "and", "update", "dictionary" ]
python
train
getpelican/pelican-plugins
yuicompressor/yuicompressor.py
https://github.com/getpelican/pelican-plugins/blob/cfc7a3f224f1743063b034561f89a6a712d13587/yuicompressor/yuicompressor.py#L15-L26
def minify(pelican): """ Minify CSS and JS with YUI Compressor :param pelican: The Pelican instance """ executable = pelican.settings.get('YUICOMPRESSOR_EXECUTABLE', 'yuicompressor') for dirpath, _, filenames in os.walk(pelican.settings['OUTPUT_PATH']): for name in filenames: if os.path.splitext(name)[1] in ('.css','.js'): filepath = os.path.join(dirpath, name) logger.info('minify %s', filepath) check_call([executable, '--charset', 'utf-8', filepath, '-o', filepath])
[ "def", "minify", "(", "pelican", ")", ":", "executable", "=", "pelican", ".", "settings", ".", "get", "(", "'YUICOMPRESSOR_EXECUTABLE'", ",", "'yuicompressor'", ")", "for", "dirpath", ",", "_", ",", "filenames", "in", "os", ".", "walk", "(", "pelican", ".", "settings", "[", "'OUTPUT_PATH'", "]", ")", ":", "for", "name", "in", "filenames", ":", "if", "os", ".", "path", ".", "splitext", "(", "name", ")", "[", "1", "]", "in", "(", "'.css'", ",", "'.js'", ")", ":", "filepath", "=", "os", ".", "path", ".", "join", "(", "dirpath", ",", "name", ")", "logger", ".", "info", "(", "'minify %s'", ",", "filepath", ")", "check_call", "(", "[", "executable", ",", "'--charset'", ",", "'utf-8'", ",", "filepath", ",", "'-o'", ",", "filepath", "]", ")" ]
Minify CSS and JS with YUI Compressor :param pelican: The Pelican instance
[ "Minify", "CSS", "and", "JS", "with", "YUI", "Compressor", ":", "param", "pelican", ":", "The", "Pelican", "instance" ]
python
train
globus/globus-cli
globus_cli/parsing/shared_options.py
https://github.com/globus/globus-cli/blob/336675ff24da64c5ee487243f39ae39fc49a7e14/globus_cli/parsing/shared_options.py#L60-L89
def endpoint_id_arg(*args, **kwargs): """ This is the `ENDPOINT_ID` argument consumed by many Transfer endpoint related operations. It accepts alternate metavars for cases when another name is desirable (e.x. `SHARE_ID`, `HOST_ENDPOINT_ID`), but can also be applied as a direct decorator if no specialized metavar is being passed. Usage: >>> @endpoint_id_arg >>> def command_func(endpoint_id): >>> ... or >>> @endpoint_id_arg(metavar='HOST_ENDPOINT_ID') >>> def command_func(endpoint_id): >>> ... """ def decorate(f, **kwargs): """ Work of actually decorating a function -- wrapped in here because we want to dispatch depending on how this is invoked """ metavar = kwargs.get("metavar", "ENDPOINT_ID") f = click.argument("endpoint_id", metavar=metavar, type=click.UUID)(f) return f return detect_and_decorate(decorate, args, kwargs)
[ "def", "endpoint_id_arg", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "decorate", "(", "f", ",", "*", "*", "kwargs", ")", ":", "\"\"\"\n Work of actually decorating a function -- wrapped in here because we\n want to dispatch depending on how this is invoked\n \"\"\"", "metavar", "=", "kwargs", ".", "get", "(", "\"metavar\"", ",", "\"ENDPOINT_ID\"", ")", "f", "=", "click", ".", "argument", "(", "\"endpoint_id\"", ",", "metavar", "=", "metavar", ",", "type", "=", "click", ".", "UUID", ")", "(", "f", ")", "return", "f", "return", "detect_and_decorate", "(", "decorate", ",", "args", ",", "kwargs", ")" ]
This is the `ENDPOINT_ID` argument consumed by many Transfer endpoint related operations. It accepts alternate metavars for cases when another name is desirable (e.x. `SHARE_ID`, `HOST_ENDPOINT_ID`), but can also be applied as a direct decorator if no specialized metavar is being passed. Usage: >>> @endpoint_id_arg >>> def command_func(endpoint_id): >>> ... or >>> @endpoint_id_arg(metavar='HOST_ENDPOINT_ID') >>> def command_func(endpoint_id): >>> ...
[ "This", "is", "the", "ENDPOINT_ID", "argument", "consumed", "by", "many", "Transfer", "endpoint", "related", "operations", ".", "It", "accepts", "alternate", "metavars", "for", "cases", "when", "another", "name", "is", "desirable", "(", "e", ".", "x", ".", "SHARE_ID", "HOST_ENDPOINT_ID", ")", "but", "can", "also", "be", "applied", "as", "a", "direct", "decorator", "if", "no", "specialized", "metavar", "is", "being", "passed", "." ]
python
train
PyCQA/pylint-django
pylint_django/augmentations/__init__.py
https://github.com/PyCQA/pylint-django/blob/0bbee433519f48134df4a797341c4196546a454e/pylint_django/augmentations/__init__.py#L469-L480
def is_model_mpttmeta_subclass(node): """Checks that node is derivative of MPTTMeta class.""" if node.name != 'MPTTMeta' or not isinstance(node.parent, ClassDef): return False parents = ('django.db.models.base.Model', '.Model', # for the transformed version used in this plugin 'django.forms.forms.Form', '.Form', 'django.forms.models.ModelForm', '.ModelForm') return node_is_subclass(node.parent, *parents)
[ "def", "is_model_mpttmeta_subclass", "(", "node", ")", ":", "if", "node", ".", "name", "!=", "'MPTTMeta'", "or", "not", "isinstance", "(", "node", ".", "parent", ",", "ClassDef", ")", ":", "return", "False", "parents", "=", "(", "'django.db.models.base.Model'", ",", "'.Model'", ",", "# for the transformed version used in this plugin", "'django.forms.forms.Form'", ",", "'.Form'", ",", "'django.forms.models.ModelForm'", ",", "'.ModelForm'", ")", "return", "node_is_subclass", "(", "node", ".", "parent", ",", "*", "parents", ")" ]
Checks that node is derivative of MPTTMeta class.
[ "Checks", "that", "node", "is", "derivative", "of", "MPTTMeta", "class", "." ]
python
train
Metatab/metatab
metatab/doc.py
https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/doc.py#L466-L515
def load_terms(self, terms): """Create a builder from a sequence of terms, usually a TermInterpreter""" #if self.root and len(self.root.children) > 0: # raise MetatabError("Can't run after adding terms to document.") for t in terms: t.doc = self if t.term_is('root.root'): if not self.root: self.root = t self.add_section(t) continue if t.term_is('root.section'): self.add_section(t) elif t.parent_term_lc == 'root': self.add_term(t) else: # These terms aren't added to the doc because they are attached to a # parent term that is added to the doc. assert t.parent is not None try: dd = terms.declare_dict self.decl_terms.update(dd['terms']) self.decl_sections.update(dd['sections']) self.super_terms.update(terms.super_terms()) kf = lambda e: e[1] # Sort on the value self.derived_terms ={ k:set( e[0] for e in g) for k, g in groupby(sorted(self.super_terms.items(), key=kf), kf)} except AttributeError as e: pass try: self.errors = terms.errors_as_dict() except AttributeError: self.errors = {} return self
[ "def", "load_terms", "(", "self", ",", "terms", ")", ":", "#if self.root and len(self.root.children) > 0:", "# raise MetatabError(\"Can't run after adding terms to document.\")", "for", "t", "in", "terms", ":", "t", ".", "doc", "=", "self", "if", "t", ".", "term_is", "(", "'root.root'", ")", ":", "if", "not", "self", ".", "root", ":", "self", ".", "root", "=", "t", "self", ".", "add_section", "(", "t", ")", "continue", "if", "t", ".", "term_is", "(", "'root.section'", ")", ":", "self", ".", "add_section", "(", "t", ")", "elif", "t", ".", "parent_term_lc", "==", "'root'", ":", "self", ".", "add_term", "(", "t", ")", "else", ":", "# These terms aren't added to the doc because they are attached to a", "# parent term that is added to the doc.", "assert", "t", ".", "parent", "is", "not", "None", "try", ":", "dd", "=", "terms", ".", "declare_dict", "self", ".", "decl_terms", ".", "update", "(", "dd", "[", "'terms'", "]", ")", "self", ".", "decl_sections", ".", "update", "(", "dd", "[", "'sections'", "]", ")", "self", ".", "super_terms", ".", "update", "(", "terms", ".", "super_terms", "(", ")", ")", "kf", "=", "lambda", "e", ":", "e", "[", "1", "]", "# Sort on the value", "self", ".", "derived_terms", "=", "{", "k", ":", "set", "(", "e", "[", "0", "]", "for", "e", "in", "g", ")", "for", "k", ",", "g", "in", "groupby", "(", "sorted", "(", "self", ".", "super_terms", ".", "items", "(", ")", ",", "key", "=", "kf", ")", ",", "kf", ")", "}", "except", "AttributeError", "as", "e", ":", "pass", "try", ":", "self", ".", "errors", "=", "terms", ".", "errors_as_dict", "(", ")", "except", "AttributeError", ":", "self", ".", "errors", "=", "{", "}", "return", "self" ]
Create a builder from a sequence of terms, usually a TermInterpreter
[ "Create", "a", "builder", "from", "a", "sequence", "of", "terms", "usually", "a", "TermInterpreter" ]
python
train
spyder-ide/spyder
spyder/widgets/comboboxes.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/widgets/comboboxes.py#L59-L72
def keyPressEvent(self, event): """Qt Override. Handle key press events. """ if event.key() == Qt.Key_Return or event.key() == Qt.Key_Enter: if self.add_current_text_if_valid(): self.selected() self.hide_completer() elif event.key() == Qt.Key_Escape: self.set_current_text(self.selected_text) self.hide_completer() else: QComboBox.keyPressEvent(self, event)
[ "def", "keyPressEvent", "(", "self", ",", "event", ")", ":", "if", "event", ".", "key", "(", ")", "==", "Qt", ".", "Key_Return", "or", "event", ".", "key", "(", ")", "==", "Qt", ".", "Key_Enter", ":", "if", "self", ".", "add_current_text_if_valid", "(", ")", ":", "self", ".", "selected", "(", ")", "self", ".", "hide_completer", "(", ")", "elif", "event", ".", "key", "(", ")", "==", "Qt", ".", "Key_Escape", ":", "self", ".", "set_current_text", "(", "self", ".", "selected_text", ")", "self", ".", "hide_completer", "(", ")", "else", ":", "QComboBox", ".", "keyPressEvent", "(", "self", ",", "event", ")" ]
Qt Override. Handle key press events.
[ "Qt", "Override", ".", "Handle", "key", "press", "events", "." ]
python
train
grst/geos
geos/server.py
https://github.com/grst/geos/blob/ea15abcc5d8f86c9051df55e489b7d941b51a638/geos/server.py#L96-L101
def kml_region(map_source, z, x, y): """KML region fetched by a Google Earth network link. """ map = app.config["mapsources"][map_source] kml_doc = KMLRegion(app.config["url_formatter"], map, app.config["LOG_TILES_PER_ROW"], z, x, y) return kml_response(kml_doc)
[ "def", "kml_region", "(", "map_source", ",", "z", ",", "x", ",", "y", ")", ":", "map", "=", "app", ".", "config", "[", "\"mapsources\"", "]", "[", "map_source", "]", "kml_doc", "=", "KMLRegion", "(", "app", ".", "config", "[", "\"url_formatter\"", "]", ",", "map", ",", "app", ".", "config", "[", "\"LOG_TILES_PER_ROW\"", "]", ",", "z", ",", "x", ",", "y", ")", "return", "kml_response", "(", "kml_doc", ")" ]
KML region fetched by a Google Earth network link.
[ "KML", "region", "fetched", "by", "a", "Google", "Earth", "network", "link", "." ]
python
train
ianepperson/telnetsrvlib
telnetsrv/evtlet.py
https://github.com/ianepperson/telnetsrvlib/blob/fac52a4a333c2d373d53d295a76a0bbd71e5d682/telnetsrv/evtlet.py#L16-L24
def setup(self): '''Called after instantiation''' TelnetHandlerBase.setup(self) # Spawn a greenlet to handle socket input self.greenlet_ic = eventlet.spawn(self.inputcooker) # Note that inputcooker exits on EOF # Sleep for 0.5 second to allow options negotiation eventlet.sleep(0.5)
[ "def", "setup", "(", "self", ")", ":", "TelnetHandlerBase", ".", "setup", "(", "self", ")", "# Spawn a greenlet to handle socket input", "self", ".", "greenlet_ic", "=", "eventlet", ".", "spawn", "(", "self", ".", "inputcooker", ")", "# Note that inputcooker exits on EOF", "# Sleep for 0.5 second to allow options negotiation", "eventlet", ".", "sleep", "(", "0.5", ")" ]
Called after instantiation
[ "Called", "after", "instantiation" ]
python
train
rosenbrockc/fortpy
fortpy/elements.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/elements.py#L507-L519
def customtype(self): """If this variable is a user-derivedy type, return the CustomType instance that is its kind. """ result = None if self.is_custom: #Look for the module that declares this variable's kind in its public list. self.dependency() if self._kind_module is not None: if self.kind.lower() in self._kind_module.types: result = self._kind_module.types[self.kind.lower()] return result
[ "def", "customtype", "(", "self", ")", ":", "result", "=", "None", "if", "self", ".", "is_custom", ":", "#Look for the module that declares this variable's kind in its public list.", "self", ".", "dependency", "(", ")", "if", "self", ".", "_kind_module", "is", "not", "None", ":", "if", "self", ".", "kind", ".", "lower", "(", ")", "in", "self", ".", "_kind_module", ".", "types", ":", "result", "=", "self", ".", "_kind_module", ".", "types", "[", "self", ".", "kind", ".", "lower", "(", ")", "]", "return", "result" ]
If this variable is a user-derivedy type, return the CustomType instance that is its kind.
[ "If", "this", "variable", "is", "a", "user", "-", "derivedy", "type", "return", "the", "CustomType", "instance", "that", "is", "its", "kind", "." ]
python
train
dschep/ntfy
ntfy/backends/pushbullet.py
https://github.com/dschep/ntfy/blob/ecfeee960af406a27ebb123495e0ec2733286889/ntfy/backends/pushbullet.py#L6-L39
def notify(title, message, access_token, device_iden=None, email=None, retcode=None): """ Required parameter: * ``access_token`` - Your Pushbullet access token, created at https://www.pushbullet.com/#settings/account Optional parameters: * ``device_iden`` - a device identifier, if omited, notification is sent to all devices * ``email`` - send notification to pushbullte user with the specified email or send an email if they aren't a pushullet user """ data = { 'type': 'note', 'title': title, 'body': message, } if device_iden is not None: data['device_iden'] = device_iden if email is not None: data['email'] = email headers = {'Access-Token': access_token, 'User-Agent': USER_AGENT} resp = requests.post( 'https://api.pushbullet.com/v2/pushes', data=data, headers=headers) resp.raise_for_status()
[ "def", "notify", "(", "title", ",", "message", ",", "access_token", ",", "device_iden", "=", "None", ",", "email", "=", "None", ",", "retcode", "=", "None", ")", ":", "data", "=", "{", "'type'", ":", "'note'", ",", "'title'", ":", "title", ",", "'body'", ":", "message", ",", "}", "if", "device_iden", "is", "not", "None", ":", "data", "[", "'device_iden'", "]", "=", "device_iden", "if", "email", "is", "not", "None", ":", "data", "[", "'email'", "]", "=", "email", "headers", "=", "{", "'Access-Token'", ":", "access_token", ",", "'User-Agent'", ":", "USER_AGENT", "}", "resp", "=", "requests", ".", "post", "(", "'https://api.pushbullet.com/v2/pushes'", ",", "data", "=", "data", ",", "headers", "=", "headers", ")", "resp", ".", "raise_for_status", "(", ")" ]
Required parameter: * ``access_token`` - Your Pushbullet access token, created at https://www.pushbullet.com/#settings/account Optional parameters: * ``device_iden`` - a device identifier, if omited, notification is sent to all devices * ``email`` - send notification to pushbullte user with the specified email or send an email if they aren't a pushullet user
[ "Required", "parameter", ":", "*", "access_token", "-", "Your", "Pushbullet", "access", "token", "created", "at", "https", ":", "//", "www", ".", "pushbullet", ".", "com", "/", "#settings", "/", "account" ]
python
train
aparo/pyes
pyes/managers.py
https://github.com/aparo/pyes/blob/712eb6095961755067b2b5baa262008ade6584b3/pyes/managers.py#L167-L178
def create_index_if_missing(self, index, settings=None): """Creates an index if it doesn't already exist. If supplied, settings must be a dictionary. :param index: the name of the index :keyword settings: a settings object or a dict containing settings """ try: return self.create_index(index, settings) except IndexAlreadyExistsException as e: return e.result
[ "def", "create_index_if_missing", "(", "self", ",", "index", ",", "settings", "=", "None", ")", ":", "try", ":", "return", "self", ".", "create_index", "(", "index", ",", "settings", ")", "except", "IndexAlreadyExistsException", "as", "e", ":", "return", "e", ".", "result" ]
Creates an index if it doesn't already exist. If supplied, settings must be a dictionary. :param index: the name of the index :keyword settings: a settings object or a dict containing settings
[ "Creates", "an", "index", "if", "it", "doesn", "t", "already", "exist", "." ]
python
train
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L10323-L10343
def run_usb_device_filters(self, device): """Asks the server to run USB devices filters of the associated machine against the given USB device and tell if there is a match. Intended to be used only for remote USB devices. Local ones don't require to call this method (this is done implicitly by the Host and USBProxyService). in device of type :class:`IUSBDevice` out matched of type bool out masked_interfaces of type int """ if not isinstance(device, IUSBDevice): raise TypeError("device can only be an instance of type IUSBDevice") (matched, masked_interfaces) = self._call("runUSBDeviceFilters", in_p=[device]) return (matched, masked_interfaces)
[ "def", "run_usb_device_filters", "(", "self", ",", "device", ")", ":", "if", "not", "isinstance", "(", "device", ",", "IUSBDevice", ")", ":", "raise", "TypeError", "(", "\"device can only be an instance of type IUSBDevice\"", ")", "(", "matched", ",", "masked_interfaces", ")", "=", "self", ".", "_call", "(", "\"runUSBDeviceFilters\"", ",", "in_p", "=", "[", "device", "]", ")", "return", "(", "matched", ",", "masked_interfaces", ")" ]
Asks the server to run USB devices filters of the associated machine against the given USB device and tell if there is a match. Intended to be used only for remote USB devices. Local ones don't require to call this method (this is done implicitly by the Host and USBProxyService). in device of type :class:`IUSBDevice` out matched of type bool out masked_interfaces of type int
[ "Asks", "the", "server", "to", "run", "USB", "devices", "filters", "of", "the", "associated", "machine", "against", "the", "given", "USB", "device", "and", "tell", "if", "there", "is", "a", "match", ".", "Intended", "to", "be", "used", "only", "for", "remote", "USB", "devices", ".", "Local", "ones", "don", "t", "require", "to", "call", "this", "method", "(", "this", "is", "done", "implicitly", "by", "the", "Host", "and", "USBProxyService", ")", "." ]
python
train
callowayproject/django-categories
categories/registration.py
https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/registration.py#L22-L89
def register_model(self, app, model_name, field_type, field_definitions): """ Process for Django 1.7 + app: app name/label model_name: name of the model field_definitions: a string, tuple or list of field configurations field_type: either 'ForeignKey' or 'ManyToManyField' """ from django.apps import apps import collections app_label = app if isinstance(field_definitions, str): field_definitions = [field_definitions] elif not isinstance(field_definitions, collections.Iterable): raise ImproperlyConfigured(_('Field configuration for %(app)s should be a string or iterable') % {'app': app}) if field_type not in ('ForeignKey', 'ManyToManyField'): raise ImproperlyConfigured(_('`field_type` must be either `"ForeignKey"` or `"ManyToManyField"`.')) try: if not hasattr(model_name, "_meta"): app_config = apps.get_app_config(app) app_label = app_config.label model = app_config.get_model(model_name) else: model = model_name model_name = model._meta.model_name opts = model._meta if app_label not in self._model_registry: self._model_registry[app_label] = [] if model not in self._model_registry[app_label]: self._model_registry[app_label].append(model) except LookupError: raise ImproperlyConfigured('Model "%(model)s" doesn\'t exist in app "%(app)s".' % {'model': model_name, 'app': app}) if not isinstance(field_definitions, (tuple, list)): field_definitions = [field_definitions] for fld in field_definitions: extra_params = {'to': 'categories.Category', 'blank': True} if field_type != 'ManyToManyField': extra_params['on_delete'] = CASCADE extra_params['null'] = True if isinstance(fld, str): field_name = fld elif isinstance(fld, dict): if 'name' in fld: field_name = fld.pop('name') else: continue extra_params.update(fld) else: raise ImproperlyConfigured( _("%(settings)s doesn't recognize the value of %(app)s.%(model)s") % { 'settings': 'CATEGORY_SETTINGS', 'app': app, 'model': model_name}) registry_name = ".".join([app_label, model_name.lower(), field_name]) if registry_name in self._field_registry: continue try: opts.get_field(field_name) except FieldDoesNotExist: self._field_registry[registry_name] = FIELD_TYPES[field_type](**extra_params) self._field_registry[registry_name].contribute_to_class(model, field_name)
[ "def", "register_model", "(", "self", ",", "app", ",", "model_name", ",", "field_type", ",", "field_definitions", ")", ":", "from", "django", ".", "apps", "import", "apps", "import", "collections", "app_label", "=", "app", "if", "isinstance", "(", "field_definitions", ",", "str", ")", ":", "field_definitions", "=", "[", "field_definitions", "]", "elif", "not", "isinstance", "(", "field_definitions", ",", "collections", ".", "Iterable", ")", ":", "raise", "ImproperlyConfigured", "(", "_", "(", "'Field configuration for %(app)s should be a string or iterable'", ")", "%", "{", "'app'", ":", "app", "}", ")", "if", "field_type", "not", "in", "(", "'ForeignKey'", ",", "'ManyToManyField'", ")", ":", "raise", "ImproperlyConfigured", "(", "_", "(", "'`field_type` must be either `\"ForeignKey\"` or `\"ManyToManyField\"`.'", ")", ")", "try", ":", "if", "not", "hasattr", "(", "model_name", ",", "\"_meta\"", ")", ":", "app_config", "=", "apps", ".", "get_app_config", "(", "app", ")", "app_label", "=", "app_config", ".", "label", "model", "=", "app_config", ".", "get_model", "(", "model_name", ")", "else", ":", "model", "=", "model_name", "model_name", "=", "model", ".", "_meta", ".", "model_name", "opts", "=", "model", ".", "_meta", "if", "app_label", "not", "in", "self", ".", "_model_registry", ":", "self", ".", "_model_registry", "[", "app_label", "]", "=", "[", "]", "if", "model", "not", "in", "self", ".", "_model_registry", "[", "app_label", "]", ":", "self", ".", "_model_registry", "[", "app_label", "]", ".", "append", "(", "model", ")", "except", "LookupError", ":", "raise", "ImproperlyConfigured", "(", "'Model \"%(model)s\" doesn\\'t exist in app \"%(app)s\".'", "%", "{", "'model'", ":", "model_name", ",", "'app'", ":", "app", "}", ")", "if", "not", "isinstance", "(", "field_definitions", ",", "(", "tuple", ",", "list", ")", ")", ":", "field_definitions", "=", "[", "field_definitions", "]", "for", "fld", "in", "field_definitions", ":", "extra_params", "=", "{", "'to'", ":", "'categories.Category'", ",", "'blank'", ":", "True", "}", "if", "field_type", "!=", "'ManyToManyField'", ":", "extra_params", "[", "'on_delete'", "]", "=", "CASCADE", "extra_params", "[", "'null'", "]", "=", "True", "if", "isinstance", "(", "fld", ",", "str", ")", ":", "field_name", "=", "fld", "elif", "isinstance", "(", "fld", ",", "dict", ")", ":", "if", "'name'", "in", "fld", ":", "field_name", "=", "fld", ".", "pop", "(", "'name'", ")", "else", ":", "continue", "extra_params", ".", "update", "(", "fld", ")", "else", ":", "raise", "ImproperlyConfigured", "(", "_", "(", "\"%(settings)s doesn't recognize the value of %(app)s.%(model)s\"", ")", "%", "{", "'settings'", ":", "'CATEGORY_SETTINGS'", ",", "'app'", ":", "app", ",", "'model'", ":", "model_name", "}", ")", "registry_name", "=", "\".\"", ".", "join", "(", "[", "app_label", ",", "model_name", ".", "lower", "(", ")", ",", "field_name", "]", ")", "if", "registry_name", "in", "self", ".", "_field_registry", ":", "continue", "try", ":", "opts", ".", "get_field", "(", "field_name", ")", "except", "FieldDoesNotExist", ":", "self", ".", "_field_registry", "[", "registry_name", "]", "=", "FIELD_TYPES", "[", "field_type", "]", "(", "*", "*", "extra_params", ")", "self", ".", "_field_registry", "[", "registry_name", "]", ".", "contribute_to_class", "(", "model", ",", "field_name", ")" ]
Process for Django 1.7 + app: app name/label model_name: name of the model field_definitions: a string, tuple or list of field configurations field_type: either 'ForeignKey' or 'ManyToManyField'
[ "Process", "for", "Django", "1", ".", "7", "+", "app", ":", "app", "name", "/", "label", "model_name", ":", "name", "of", "the", "model", "field_definitions", ":", "a", "string", "tuple", "or", "list", "of", "field", "configurations", "field_type", ":", "either", "ForeignKey", "or", "ManyToManyField" ]
python
train
Alignak-monitoring/alignak
alignak/stats.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/stats.py#L308-L362
def flush(self, log=False): """Send inner stored metrics to the defined Graphite Returns False if the sending failed with a warning log if log parameter is set :return: bool """ if not self.my_metrics: logger.debug("Flushing - no metrics to send") return True now = int(time.time()) if self.last_failure and self.last_failure + self.metrics_flush_pause > now: if not self.log_metrics_flush_pause: date = datetime.datetime.fromtimestamp( self.last_failure).strftime(self.date_fmt) logger.warning("Metrics flush paused on connection error " "(last failed: %s). " "Inner stored metric: %d. Trying to send...", date, self.metrics_count) self.log_metrics_flush_pause = True return True try: logger.debug("Flushing %d metrics to Graphite/carbon", self.metrics_count) if self.carbon.send_data(): self.my_metrics = [] else: logger.warning("Failed sending metrics to Graphite/carbon. " "Inner stored metric: %d", self.metrics_count) if log: logger.warning("Failed sending metrics to Graphite/carbon. " "Inner stored metric: %d", self.metrics_count) return False if self.log_metrics_flush_pause: logger.warning("Metrics flush restored. " "Remaining stored metric: %d", self.metrics_count) self.last_failure = 0 self.log_metrics_flush_pause = False except Exception as exp: # pylint: disable=broad-except if not self.log_metrics_flush_pause: logger.warning("Failed sending metrics to Graphite/carbon. " "Inner stored metric: %d", self.metrics_count) else: date = datetime.datetime.fromtimestamp( self.last_failure).strftime(self.date_fmt) logger.warning("Metrics flush paused on connection error " "(last failed: %s). " "Inner stored metric: %d. Trying to send...", date, self.metrics_count) logger.warning("Exception: %s", str(exp)) self.last_failure = now return False return True
[ "def", "flush", "(", "self", ",", "log", "=", "False", ")", ":", "if", "not", "self", ".", "my_metrics", ":", "logger", ".", "debug", "(", "\"Flushing - no metrics to send\"", ")", "return", "True", "now", "=", "int", "(", "time", ".", "time", "(", ")", ")", "if", "self", ".", "last_failure", "and", "self", ".", "last_failure", "+", "self", ".", "metrics_flush_pause", ">", "now", ":", "if", "not", "self", ".", "log_metrics_flush_pause", ":", "date", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "self", ".", "last_failure", ")", ".", "strftime", "(", "self", ".", "date_fmt", ")", "logger", ".", "warning", "(", "\"Metrics flush paused on connection error \"", "\"(last failed: %s). \"", "\"Inner stored metric: %d. Trying to send...\"", ",", "date", ",", "self", ".", "metrics_count", ")", "self", ".", "log_metrics_flush_pause", "=", "True", "return", "True", "try", ":", "logger", ".", "debug", "(", "\"Flushing %d metrics to Graphite/carbon\"", ",", "self", ".", "metrics_count", ")", "if", "self", ".", "carbon", ".", "send_data", "(", ")", ":", "self", ".", "my_metrics", "=", "[", "]", "else", ":", "logger", ".", "warning", "(", "\"Failed sending metrics to Graphite/carbon. \"", "\"Inner stored metric: %d\"", ",", "self", ".", "metrics_count", ")", "if", "log", ":", "logger", ".", "warning", "(", "\"Failed sending metrics to Graphite/carbon. \"", "\"Inner stored metric: %d\"", ",", "self", ".", "metrics_count", ")", "return", "False", "if", "self", ".", "log_metrics_flush_pause", ":", "logger", ".", "warning", "(", "\"Metrics flush restored. \"", "\"Remaining stored metric: %d\"", ",", "self", ".", "metrics_count", ")", "self", ".", "last_failure", "=", "0", "self", ".", "log_metrics_flush_pause", "=", "False", "except", "Exception", "as", "exp", ":", "# pylint: disable=broad-except", "if", "not", "self", ".", "log_metrics_flush_pause", ":", "logger", ".", "warning", "(", "\"Failed sending metrics to Graphite/carbon. \"", "\"Inner stored metric: %d\"", ",", "self", ".", "metrics_count", ")", "else", ":", "date", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "self", ".", "last_failure", ")", ".", "strftime", "(", "self", ".", "date_fmt", ")", "logger", ".", "warning", "(", "\"Metrics flush paused on connection error \"", "\"(last failed: %s). \"", "\"Inner stored metric: %d. Trying to send...\"", ",", "date", ",", "self", ".", "metrics_count", ")", "logger", ".", "warning", "(", "\"Exception: %s\"", ",", "str", "(", "exp", ")", ")", "self", ".", "last_failure", "=", "now", "return", "False", "return", "True" ]
Send inner stored metrics to the defined Graphite Returns False if the sending failed with a warning log if log parameter is set :return: bool
[ "Send", "inner", "stored", "metrics", "to", "the", "defined", "Graphite" ]
python
train
jahuth/litus
spikes.py
https://github.com/jahuth/litus/blob/712b016ea2dbb1cf0a30bfdbb0a136945a7b7c5e/spikes.py#L302-L319
def space(self,bins=None,units=None,conversion_function=convert_time,resolution=None,end_at_end=True,scale=None): """ Computes adequat binning for the dimension (on the values). bins: number of bins or None units: str or None conversion_function: function to convert units to other units resolution: step size or None end_at_end: Boolean only if `unit == 1` whether or not the last point should be the last data point (True) or one after the last valid point (False) scale: 'lin','log' or None a spike container can also use 'unique', but not the LabelDimension itself! if the LabelDimension.scale is 'unique', .bins() will return a linear spacing """ if scale in ['log'] or (scale is None and self.scale in ['log']): return self.logspace(bins=bins,units=units,conversion_function=conversion_function,resolution=resolution,end_at_end=end_at_end) return self.linspace(bins=bins,units=units,conversion_function=conversion_function,resolution=resolution,end_at_end=end_at_end)
[ "def", "space", "(", "self", ",", "bins", "=", "None", ",", "units", "=", "None", ",", "conversion_function", "=", "convert_time", ",", "resolution", "=", "None", ",", "end_at_end", "=", "True", ",", "scale", "=", "None", ")", ":", "if", "scale", "in", "[", "'log'", "]", "or", "(", "scale", "is", "None", "and", "self", ".", "scale", "in", "[", "'log'", "]", ")", ":", "return", "self", ".", "logspace", "(", "bins", "=", "bins", ",", "units", "=", "units", ",", "conversion_function", "=", "conversion_function", ",", "resolution", "=", "resolution", ",", "end_at_end", "=", "end_at_end", ")", "return", "self", ".", "linspace", "(", "bins", "=", "bins", ",", "units", "=", "units", ",", "conversion_function", "=", "conversion_function", ",", "resolution", "=", "resolution", ",", "end_at_end", "=", "end_at_end", ")" ]
Computes adequat binning for the dimension (on the values). bins: number of bins or None units: str or None conversion_function: function to convert units to other units resolution: step size or None end_at_end: Boolean only if `unit == 1` whether or not the last point should be the last data point (True) or one after the last valid point (False) scale: 'lin','log' or None a spike container can also use 'unique', but not the LabelDimension itself! if the LabelDimension.scale is 'unique', .bins() will return a linear spacing
[ "Computes", "adequat", "binning", "for", "the", "dimension", "(", "on", "the", "values", ")", "." ]
python
train
klen/peewee_migrate
peewee_migrate/router.py
https://github.com/klen/peewee_migrate/blob/b77895ab1c9be3121bc127e0c2dfb047eed8b24c/peewee_migrate/router.py#L243-L248
def clear(self): """Remove migrations from fs.""" super(Router, self).clear() for name in self.todo: filename = os.path.join(self.migrate_dir, name + '.py') os.remove(filename)
[ "def", "clear", "(", "self", ")", ":", "super", "(", "Router", ",", "self", ")", ".", "clear", "(", ")", "for", "name", "in", "self", ".", "todo", ":", "filename", "=", "os", ".", "path", ".", "join", "(", "self", ".", "migrate_dir", ",", "name", "+", "'.py'", ")", "os", ".", "remove", "(", "filename", ")" ]
Remove migrations from fs.
[ "Remove", "migrations", "from", "fs", "." ]
python
train
knipknap/exscript
Exscript/protocols/telnetlib.py
https://github.com/knipknap/exscript/blob/72718eee3e87b345d5a5255be9824e867e42927b/Exscript/protocols/telnetlib.py#L282-L287
def close(self): """Close the connection.""" if self.sock: self.sock.close() self.sock = 0 self.eof = 1
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "sock", ":", "self", ".", "sock", ".", "close", "(", ")", "self", ".", "sock", "=", "0", "self", ".", "eof", "=", "1" ]
Close the connection.
[ "Close", "the", "connection", "." ]
python
train
hyperledger/sawtooth-core
rest_api/sawtooth_rest_api/route_handlers.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/rest_api/sawtooth_rest_api/route_handlers.py#L319-L349
async def list_blocks(self, request): """Fetches list of blocks from validator, optionally filtered by id. Request: query: - head: The id of the block to use as the head of the chain - id: Comma separated list of block ids to include in results Response: data: JSON array of fully expanded Block objects head: The head used for this query (most recent if unspecified) link: The link to this exact query, including head block paging: Paging info and nav, like total resources and a next link """ paging_controls = self._get_paging_controls(request) validator_query = client_block_pb2.ClientBlockListRequest( head_id=self._get_head_id(request), block_ids=self._get_filter_ids(request), sorting=self._get_sorting_message(request, "block_num"), paging=self._make_paging_message(paging_controls)) response = await self._query_validator( Message.CLIENT_BLOCK_LIST_REQUEST, client_block_pb2.ClientBlockListResponse, validator_query) return self._wrap_paginated_response( request=request, response=response, controls=paging_controls, data=[self._expand_block(b) for b in response['blocks']])
[ "async", "def", "list_blocks", "(", "self", ",", "request", ")", ":", "paging_controls", "=", "self", ".", "_get_paging_controls", "(", "request", ")", "validator_query", "=", "client_block_pb2", ".", "ClientBlockListRequest", "(", "head_id", "=", "self", ".", "_get_head_id", "(", "request", ")", ",", "block_ids", "=", "self", ".", "_get_filter_ids", "(", "request", ")", ",", "sorting", "=", "self", ".", "_get_sorting_message", "(", "request", ",", "\"block_num\"", ")", ",", "paging", "=", "self", ".", "_make_paging_message", "(", "paging_controls", ")", ")", "response", "=", "await", "self", ".", "_query_validator", "(", "Message", ".", "CLIENT_BLOCK_LIST_REQUEST", ",", "client_block_pb2", ".", "ClientBlockListResponse", ",", "validator_query", ")", "return", "self", ".", "_wrap_paginated_response", "(", "request", "=", "request", ",", "response", "=", "response", ",", "controls", "=", "paging_controls", ",", "data", "=", "[", "self", ".", "_expand_block", "(", "b", ")", "for", "b", "in", "response", "[", "'blocks'", "]", "]", ")" ]
Fetches list of blocks from validator, optionally filtered by id. Request: query: - head: The id of the block to use as the head of the chain - id: Comma separated list of block ids to include in results Response: data: JSON array of fully expanded Block objects head: The head used for this query (most recent if unspecified) link: The link to this exact query, including head block paging: Paging info and nav, like total resources and a next link
[ "Fetches", "list", "of", "blocks", "from", "validator", "optionally", "filtered", "by", "id", "." ]
python
train
gwastro/pycbc
pycbc/tmpltbank/option_utils.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/tmpltbank/option_utils.py#L223-L233
def from_argparse(cls, opts): """ Initialize an instance of the metricParameters class from an argparse.OptionParser instance. This assumes that insert_metric_calculation_options and verify_metric_calculation_options have already been called before initializing the class. """ return cls(opts.pn_order, opts.f_low, opts.f_upper, opts.delta_f,\ f0=opts.f0, write_metric=opts.write_metric)
[ "def", "from_argparse", "(", "cls", ",", "opts", ")", ":", "return", "cls", "(", "opts", ".", "pn_order", ",", "opts", ".", "f_low", ",", "opts", ".", "f_upper", ",", "opts", ".", "delta_f", ",", "f0", "=", "opts", ".", "f0", ",", "write_metric", "=", "opts", ".", "write_metric", ")" ]
Initialize an instance of the metricParameters class from an argparse.OptionParser instance. This assumes that insert_metric_calculation_options and verify_metric_calculation_options have already been called before initializing the class.
[ "Initialize", "an", "instance", "of", "the", "metricParameters", "class", "from", "an", "argparse", ".", "OptionParser", "instance", ".", "This", "assumes", "that", "insert_metric_calculation_options", "and", "verify_metric_calculation_options", "have", "already", "been", "called", "before", "initializing", "the", "class", "." ]
python
train
openergy/oplus
oplus/util.py
https://github.com/openergy/oplus/blob/f095868d1990c1d126e906ada6acbab26348b3d3/oplus/util.py#L119-L153
def get_string_buffer(path_or_content, expected_extension): """ path_or_content: path or content_str or content_bts or string_io or bytes_io Returns ------- string_buffer, path path will be None if input was not a path """ buffer, path = None, None # path or content string if isinstance(path_or_content, str): if path_or_content[-len(expected_extension)-1:] == ".%s" % expected_extension: if not os.path.isfile(path_or_content): raise FileNotFoundError("No file at given path: '%s'." % path_or_content) buffer, path = open(path_or_content, encoding=CONF.encoding), path_or_content else: buffer = io.StringIO(path_or_content, ) # text io elif isinstance(path_or_content, io.TextIOBase): buffer = path_or_content # bytes elif isinstance(path_or_content, bytes): buffer = io.StringIO(path_or_content.decode(encoding=CONF.encoding)) elif isinstance(path_or_content, io.BufferedIOBase): buffer = io.StringIO(path_or_content.read().decode(encoding=CONF.encoding)) else: raise ValueError("path_or_content type could not be identified") return buffer, path
[ "def", "get_string_buffer", "(", "path_or_content", ",", "expected_extension", ")", ":", "buffer", ",", "path", "=", "None", ",", "None", "# path or content string", "if", "isinstance", "(", "path_or_content", ",", "str", ")", ":", "if", "path_or_content", "[", "-", "len", "(", "expected_extension", ")", "-", "1", ":", "]", "==", "\".%s\"", "%", "expected_extension", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "path_or_content", ")", ":", "raise", "FileNotFoundError", "(", "\"No file at given path: '%s'.\"", "%", "path_or_content", ")", "buffer", ",", "path", "=", "open", "(", "path_or_content", ",", "encoding", "=", "CONF", ".", "encoding", ")", ",", "path_or_content", "else", ":", "buffer", "=", "io", ".", "StringIO", "(", "path_or_content", ",", ")", "# text io", "elif", "isinstance", "(", "path_or_content", ",", "io", ".", "TextIOBase", ")", ":", "buffer", "=", "path_or_content", "# bytes", "elif", "isinstance", "(", "path_or_content", ",", "bytes", ")", ":", "buffer", "=", "io", ".", "StringIO", "(", "path_or_content", ".", "decode", "(", "encoding", "=", "CONF", ".", "encoding", ")", ")", "elif", "isinstance", "(", "path_or_content", ",", "io", ".", "BufferedIOBase", ")", ":", "buffer", "=", "io", ".", "StringIO", "(", "path_or_content", ".", "read", "(", ")", ".", "decode", "(", "encoding", "=", "CONF", ".", "encoding", ")", ")", "else", ":", "raise", "ValueError", "(", "\"path_or_content type could not be identified\"", ")", "return", "buffer", ",", "path" ]
path_or_content: path or content_str or content_bts or string_io or bytes_io Returns ------- string_buffer, path path will be None if input was not a path
[ "path_or_content", ":", "path", "or", "content_str", "or", "content_bts", "or", "string_io", "or", "bytes_io" ]
python
test
kwikteam/phy
phy/plot/transform.py
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/transform.py#L300-L305
def remove(self, name): """Remove a transform in the chain.""" cpu_transforms = self._remove_transform(self.cpu_transforms, name) gpu_transforms = self._remove_transform(self.gpu_transforms, name) return (TransformChain().add_on_cpu(cpu_transforms). add_on_gpu(gpu_transforms))
[ "def", "remove", "(", "self", ",", "name", ")", ":", "cpu_transforms", "=", "self", ".", "_remove_transform", "(", "self", ".", "cpu_transforms", ",", "name", ")", "gpu_transforms", "=", "self", ".", "_remove_transform", "(", "self", ".", "gpu_transforms", ",", "name", ")", "return", "(", "TransformChain", "(", ")", ".", "add_on_cpu", "(", "cpu_transforms", ")", ".", "add_on_gpu", "(", "gpu_transforms", ")", ")" ]
Remove a transform in the chain.
[ "Remove", "a", "transform", "in", "the", "chain", "." ]
python
train
AndreLouisCaron/runwith
runwith/__init__.py
https://github.com/AndreLouisCaron/runwith/blob/cfa2b6ae67d73ec5b24f1502a37060d838276e8b/runwith/__init__.py#L51-L94
def main(argv=None): """Program entry point.""" # If we're invoked directly (runwith ...), we'll get None here. if argv is None: argv = sys.argv[1:] # Parse command-line arguments. argv = cli.parse_args(argv) # Translate CLI arguments to Popen options. options = {} for k in ('stdin', 'stdout', 'stderr', 'cwd'): v = getattr(argv, k, None) if v: options[k] = v # Start the child process. try: process = subprocess.Popen( argv.command, **options ) except OSError: print('Invalid command %r.' % argv.command) sys.exit(2) # Wait for the child process to complete. thread = threading.Thread(target=process.wait) thread.start() if argv.time_limit is None: thread.join() else: thread.join(argv.time_limit.total_seconds()) if thread.is_alive() and argv.grace_time: process.send_signal(SIGKILL) thread.join(argv.grace_time.total_seconds()) if thread.is_alive(): process.terminate() thread.join() status = process.returncode assert status is not None # Forward exit code. return status
[ "def", "main", "(", "argv", "=", "None", ")", ":", "# If we're invoked directly (runwith ...), we'll get None here.", "if", "argv", "is", "None", ":", "argv", "=", "sys", ".", "argv", "[", "1", ":", "]", "# Parse command-line arguments.", "argv", "=", "cli", ".", "parse_args", "(", "argv", ")", "# Translate CLI arguments to Popen options.", "options", "=", "{", "}", "for", "k", "in", "(", "'stdin'", ",", "'stdout'", ",", "'stderr'", ",", "'cwd'", ")", ":", "v", "=", "getattr", "(", "argv", ",", "k", ",", "None", ")", "if", "v", ":", "options", "[", "k", "]", "=", "v", "# Start the child process.", "try", ":", "process", "=", "subprocess", ".", "Popen", "(", "argv", ".", "command", ",", "*", "*", "options", ")", "except", "OSError", ":", "print", "(", "'Invalid command %r.'", "%", "argv", ".", "command", ")", "sys", ".", "exit", "(", "2", ")", "# Wait for the child process to complete.", "thread", "=", "threading", ".", "Thread", "(", "target", "=", "process", ".", "wait", ")", "thread", ".", "start", "(", ")", "if", "argv", ".", "time_limit", "is", "None", ":", "thread", ".", "join", "(", ")", "else", ":", "thread", ".", "join", "(", "argv", ".", "time_limit", ".", "total_seconds", "(", ")", ")", "if", "thread", ".", "is_alive", "(", ")", "and", "argv", ".", "grace_time", ":", "process", ".", "send_signal", "(", "SIGKILL", ")", "thread", ".", "join", "(", "argv", ".", "grace_time", ".", "total_seconds", "(", ")", ")", "if", "thread", ".", "is_alive", "(", ")", ":", "process", ".", "terminate", "(", ")", "thread", ".", "join", "(", ")", "status", "=", "process", ".", "returncode", "assert", "status", "is", "not", "None", "# Forward exit code.", "return", "status" ]
Program entry point.
[ "Program", "entry", "point", "." ]
python
train
saltstack/salt
salt/cloud/clouds/oneandone.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/oneandone.py#L408-L426
def avail_images(conn=None, call=None): ''' Return a list of the server appliances that are on the provider ''' if call == 'action': raise SaltCloudSystemExit( 'The avail_images function must be called with ' '-f or --function, or with the --list-images option' ) if not conn: conn = get_conn() ret = {} for appliance in conn.list_appliances(): ret[appliance['name']] = appliance return ret
[ "def", "avail_images", "(", "conn", "=", "None", ",", "call", "=", "None", ")", ":", "if", "call", "==", "'action'", ":", "raise", "SaltCloudSystemExit", "(", "'The avail_images function must be called with '", "'-f or --function, or with the --list-images option'", ")", "if", "not", "conn", ":", "conn", "=", "get_conn", "(", ")", "ret", "=", "{", "}", "for", "appliance", "in", "conn", ".", "list_appliances", "(", ")", ":", "ret", "[", "appliance", "[", "'name'", "]", "]", "=", "appliance", "return", "ret" ]
Return a list of the server appliances that are on the provider
[ "Return", "a", "list", "of", "the", "server", "appliances", "that", "are", "on", "the", "provider" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_interface_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_interface_ext.py#L880-L896
def get_interface_detail_output_interface_line_protocol_exception_info(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_interface_detail = ET.Element("get_interface_detail") config = get_interface_detail output = ET.SubElement(get_interface_detail, "output") interface = ET.SubElement(output, "interface") interface_type_key = ET.SubElement(interface, "interface-type") interface_type_key.text = kwargs.pop('interface_type') interface_name_key = ET.SubElement(interface, "interface-name") interface_name_key.text = kwargs.pop('interface_name') line_protocol_exception_info = ET.SubElement(interface, "line-protocol-exception-info") line_protocol_exception_info.text = kwargs.pop('line_protocol_exception_info') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_interface_detail_output_interface_line_protocol_exception_info", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_interface_detail", "=", "ET", ".", "Element", "(", "\"get_interface_detail\"", ")", "config", "=", "get_interface_detail", "output", "=", "ET", ".", "SubElement", "(", "get_interface_detail", ",", "\"output\"", ")", "interface", "=", "ET", ".", "SubElement", "(", "output", ",", "\"interface\"", ")", "interface_type_key", "=", "ET", ".", "SubElement", "(", "interface", ",", "\"interface-type\"", ")", "interface_type_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'interface_type'", ")", "interface_name_key", "=", "ET", ".", "SubElement", "(", "interface", ",", "\"interface-name\"", ")", "interface_name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'interface_name'", ")", "line_protocol_exception_info", "=", "ET", ".", "SubElement", "(", "interface", ",", "\"line-protocol-exception-info\"", ")", "line_protocol_exception_info", ".", "text", "=", "kwargs", ".", "pop", "(", "'line_protocol_exception_info'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
nuagenetworks/bambou
bambou/nurest_modelcontroller.py
https://github.com/nuagenetworks/bambou/blob/d334fea23e384d3df8e552fe1849ad707941c666/bambou/nurest_modelcontroller.py#L87-L99
def get_first_model_with_rest_name(cls, rest_name): """ Get the first model corresponding to a rest_name Args: rest_name: the rest name """ models = cls.get_models_with_rest_name(rest_name) if len(models) > 0: return models[0] return None
[ "def", "get_first_model_with_rest_name", "(", "cls", ",", "rest_name", ")", ":", "models", "=", "cls", ".", "get_models_with_rest_name", "(", "rest_name", ")", "if", "len", "(", "models", ")", ">", "0", ":", "return", "models", "[", "0", "]", "return", "None" ]
Get the first model corresponding to a rest_name Args: rest_name: the rest name
[ "Get", "the", "first", "model", "corresponding", "to", "a", "rest_name" ]
python
train
cmbruns/pyopenvr
src/openvr/__init__.py
https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L2972-L2980
def pollNextEvent(self, pEvent): """ Returns true and fills the event with the next event on the queue if there is one. If there are no events this method returns false. uncbVREvent should be the size in bytes of the VREvent_t struct """ fn = self.function_table.pollNextEvent result = fn(byref(pEvent), sizeof(VREvent_t)) return result != 0
[ "def", "pollNextEvent", "(", "self", ",", "pEvent", ")", ":", "fn", "=", "self", ".", "function_table", ".", "pollNextEvent", "result", "=", "fn", "(", "byref", "(", "pEvent", ")", ",", "sizeof", "(", "VREvent_t", ")", ")", "return", "result", "!=", "0" ]
Returns true and fills the event with the next event on the queue if there is one. If there are no events this method returns false. uncbVREvent should be the size in bytes of the VREvent_t struct
[ "Returns", "true", "and", "fills", "the", "event", "with", "the", "next", "event", "on", "the", "queue", "if", "there", "is", "one", ".", "If", "there", "are", "no", "events", "this", "method", "returns", "false", ".", "uncbVREvent", "should", "be", "the", "size", "in", "bytes", "of", "the", "VREvent_t", "struct" ]
python
train
benley/butcher
butcher/cache.py
https://github.com/benley/butcher/blob/8b18828ea040af56b7835beab5fd03eab23cc9ee/butcher/cache.py#L128-L146
def get_obj(self, objpath, metahash, dst_path): """Get object from cache, write it to dst_path. Args: objpath: filename relative to buildroot (example: mini-boot/blahblah/somefile.bin) metahash: metahash. See targets/base.py dst_path: Absolute path where the file should be written. Raises: CacheMiss: if the item is not in the cache """ incachepath = self.path_in_cache(objpath, metahash) if not os.path.exists(incachepath): raise CacheMiss('%s not in cache.' % incachepath) else: log.debug('Cache hit! %s~%s', objpath, metahash.hexdigest()) if not os.path.exists(os.path.dirname(dst_path)): os.makedirs(os.path.dirname(dst_path)) os.link(incachepath, dst_path)
[ "def", "get_obj", "(", "self", ",", "objpath", ",", "metahash", ",", "dst_path", ")", ":", "incachepath", "=", "self", ".", "path_in_cache", "(", "objpath", ",", "metahash", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "incachepath", ")", ":", "raise", "CacheMiss", "(", "'%s not in cache.'", "%", "incachepath", ")", "else", ":", "log", ".", "debug", "(", "'Cache hit! %s~%s'", ",", "objpath", ",", "metahash", ".", "hexdigest", "(", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "dirname", "(", "dst_path", ")", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "dirname", "(", "dst_path", ")", ")", "os", ".", "link", "(", "incachepath", ",", "dst_path", ")" ]
Get object from cache, write it to dst_path. Args: objpath: filename relative to buildroot (example: mini-boot/blahblah/somefile.bin) metahash: metahash. See targets/base.py dst_path: Absolute path where the file should be written. Raises: CacheMiss: if the item is not in the cache
[ "Get", "object", "from", "cache", "write", "it", "to", "dst_path", "." ]
python
train
craffel/mir_eval
mir_eval/io.py
https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/io.py#L208-L242
def load_labeled_intervals(filename, delimiter=r'\s+'): r"""Import labeled intervals from an annotation file. The file should consist of three columns: Two consisting of numeric values corresponding to start and end time of each interval and a third corresponding to the label of each interval. This is primarily useful for processing events which span a duration, such as segmentation, chords, or instrument activation. Parameters ---------- filename : str Path to the annotation file delimiter : str Separator regular expression. By default, lines will be split by any amount of whitespace. Returns ------- intervals : np.ndarray, shape=(n_events, 2) array of event start and end time labels : list of str list of labels """ # Use our universal function to load in the events starts, ends, labels = load_delimited(filename, [float, float, str], delimiter) # Stack into an interval matrix intervals = np.array([starts, ends]).T # Validate them, but throw a warning in place of an error try: util.validate_intervals(intervals) except ValueError as error: warnings.warn(error.args[0]) return intervals, labels
[ "def", "load_labeled_intervals", "(", "filename", ",", "delimiter", "=", "r'\\s+'", ")", ":", "# Use our universal function to load in the events", "starts", ",", "ends", ",", "labels", "=", "load_delimited", "(", "filename", ",", "[", "float", ",", "float", ",", "str", "]", ",", "delimiter", ")", "# Stack into an interval matrix", "intervals", "=", "np", ".", "array", "(", "[", "starts", ",", "ends", "]", ")", ".", "T", "# Validate them, but throw a warning in place of an error", "try", ":", "util", ".", "validate_intervals", "(", "intervals", ")", "except", "ValueError", "as", "error", ":", "warnings", ".", "warn", "(", "error", ".", "args", "[", "0", "]", ")", "return", "intervals", ",", "labels" ]
r"""Import labeled intervals from an annotation file. The file should consist of three columns: Two consisting of numeric values corresponding to start and end time of each interval and a third corresponding to the label of each interval. This is primarily useful for processing events which span a duration, such as segmentation, chords, or instrument activation. Parameters ---------- filename : str Path to the annotation file delimiter : str Separator regular expression. By default, lines will be split by any amount of whitespace. Returns ------- intervals : np.ndarray, shape=(n_events, 2) array of event start and end time labels : list of str list of labels
[ "r", "Import", "labeled", "intervals", "from", "an", "annotation", "file", ".", "The", "file", "should", "consist", "of", "three", "columns", ":", "Two", "consisting", "of", "numeric", "values", "corresponding", "to", "start", "and", "end", "time", "of", "each", "interval", "and", "a", "third", "corresponding", "to", "the", "label", "of", "each", "interval", ".", "This", "is", "primarily", "useful", "for", "processing", "events", "which", "span", "a", "duration", "such", "as", "segmentation", "chords", "or", "instrument", "activation", "." ]
python
train
pudo/jsongraph
jsongraph/query.py
https://github.com/pudo/jsongraph/blob/35e4f397dbe69cd5553cf9cb9ab98859c3620f03/jsongraph/query.py#L154-L160
def base_object(self, data): """ Make sure to return all the existing filter fields for query results. """ obj = {'id': data.get(self.id)} if self.parent is not None: obj['$parent'] = data.get(self.parent.id) return obj
[ "def", "base_object", "(", "self", ",", "data", ")", ":", "obj", "=", "{", "'id'", ":", "data", ".", "get", "(", "self", ".", "id", ")", "}", "if", "self", ".", "parent", "is", "not", "None", ":", "obj", "[", "'$parent'", "]", "=", "data", ".", "get", "(", "self", ".", "parent", ".", "id", ")", "return", "obj" ]
Make sure to return all the existing filter fields for query results.
[ "Make", "sure", "to", "return", "all", "the", "existing", "filter", "fields", "for", "query", "results", "." ]
python
train
ClimateImpactLab/DataFS
datafs/core/data_archive.py
https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/core/data_archive.py#L299-L321
def _get_default_dependencies(self): ''' Get default dependencies for archive Get default dependencies from requirements file or (if no requirements file) from previous version ''' # Get default dependencies from requirements file default_dependencies = { k: v for k, v in self.api.default_versions.items() if k != self.archive_name} # If no requirements file or is empty: if len(default_dependencies) == 0: # Retrieve dependencies from last archive record history = self.get_history() if len(history) > 0: default_dependencies = history[-1].get('dependencies', {}) return default_dependencies
[ "def", "_get_default_dependencies", "(", "self", ")", ":", "# Get default dependencies from requirements file", "default_dependencies", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "self", ".", "api", ".", "default_versions", ".", "items", "(", ")", "if", "k", "!=", "self", ".", "archive_name", "}", "# If no requirements file or is empty:", "if", "len", "(", "default_dependencies", ")", "==", "0", ":", "# Retrieve dependencies from last archive record", "history", "=", "self", ".", "get_history", "(", ")", "if", "len", "(", "history", ")", ">", "0", ":", "default_dependencies", "=", "history", "[", "-", "1", "]", ".", "get", "(", "'dependencies'", ",", "{", "}", ")", "return", "default_dependencies" ]
Get default dependencies for archive Get default dependencies from requirements file or (if no requirements file) from previous version
[ "Get", "default", "dependencies", "for", "archive" ]
python
train
UDST/orca
orca/orca.py
https://github.com/UDST/orca/blob/07b34aeef13cc87c966b2e30cbe7e76cc9d3622c/orca/orca.py#L2011-L2025
def injectables(**kwargs): """ Temporarily add injectables to the pipeline environment. Takes only keyword arguments. Injectables will be returned to their original state when the context manager exits. """ global _INJECTABLES original = _INJECTABLES.copy() _INJECTABLES.update(kwargs) yield _INJECTABLES = original
[ "def", "injectables", "(", "*", "*", "kwargs", ")", ":", "global", "_INJECTABLES", "original", "=", "_INJECTABLES", ".", "copy", "(", ")", "_INJECTABLES", ".", "update", "(", "kwargs", ")", "yield", "_INJECTABLES", "=", "original" ]
Temporarily add injectables to the pipeline environment. Takes only keyword arguments. Injectables will be returned to their original state when the context manager exits.
[ "Temporarily", "add", "injectables", "to", "the", "pipeline", "environment", ".", "Takes", "only", "keyword", "arguments", "." ]
python
train
MonashBI/arcana
arcana/repository/tree.py
https://github.com/MonashBI/arcana/blob/d6271a29d13733d00422d11417af8d200be62acc/arcana/repository/tree.py#L563-L589
def _fill_empty_sessions(self, fill_subjects, fill_visits): """ Fill in tree with additional empty subjects and/or visits to allow the study to pull its inputs from external repositories """ if fill_subjects is None: fill_subjects = [s.id for s in self.subjects] if fill_visits is None: fill_visits = [v.id for v in self.complete_visits] for subject_id in fill_subjects: try: subject = self.subject(subject_id) except ArcanaNameError: subject = self._subjects[subject_id] = Subject( subject_id, [], [], []) for visit_id in fill_visits: try: subject.session(visit_id) except ArcanaNameError: session = Session(subject_id, visit_id, [], []) subject._sessions[visit_id] = session try: visit = self.visit(visit_id) except ArcanaNameError: visit = self._visits[visit_id] = Visit( visit_id, [], [], []) visit._sessions[subject_id] = session
[ "def", "_fill_empty_sessions", "(", "self", ",", "fill_subjects", ",", "fill_visits", ")", ":", "if", "fill_subjects", "is", "None", ":", "fill_subjects", "=", "[", "s", ".", "id", "for", "s", "in", "self", ".", "subjects", "]", "if", "fill_visits", "is", "None", ":", "fill_visits", "=", "[", "v", ".", "id", "for", "v", "in", "self", ".", "complete_visits", "]", "for", "subject_id", "in", "fill_subjects", ":", "try", ":", "subject", "=", "self", ".", "subject", "(", "subject_id", ")", "except", "ArcanaNameError", ":", "subject", "=", "self", ".", "_subjects", "[", "subject_id", "]", "=", "Subject", "(", "subject_id", ",", "[", "]", ",", "[", "]", ",", "[", "]", ")", "for", "visit_id", "in", "fill_visits", ":", "try", ":", "subject", ".", "session", "(", "visit_id", ")", "except", "ArcanaNameError", ":", "session", "=", "Session", "(", "subject_id", ",", "visit_id", ",", "[", "]", ",", "[", "]", ")", "subject", ".", "_sessions", "[", "visit_id", "]", "=", "session", "try", ":", "visit", "=", "self", ".", "visit", "(", "visit_id", ")", "except", "ArcanaNameError", ":", "visit", "=", "self", ".", "_visits", "[", "visit_id", "]", "=", "Visit", "(", "visit_id", ",", "[", "]", ",", "[", "]", ",", "[", "]", ")", "visit", ".", "_sessions", "[", "subject_id", "]", "=", "session" ]
Fill in tree with additional empty subjects and/or visits to allow the study to pull its inputs from external repositories
[ "Fill", "in", "tree", "with", "additional", "empty", "subjects", "and", "/", "or", "visits", "to", "allow", "the", "study", "to", "pull", "its", "inputs", "from", "external", "repositories" ]
python
train
vicalloy/lbutils
lbutils/utils.py
https://github.com/vicalloy/lbutils/blob/66ae7e73bc939f073cdc1b91602a95e67caf4ba6/lbutils/utils.py#L21-L25
def fmt_num(num, zero_num=None): """ humanize number(9000 to 9,000) """ if zero_num is not None: num = floatformat(num, zero_num) return intcomma(num, False)
[ "def", "fmt_num", "(", "num", ",", "zero_num", "=", "None", ")", ":", "if", "zero_num", "is", "not", "None", ":", "num", "=", "floatformat", "(", "num", ",", "zero_num", ")", "return", "intcomma", "(", "num", ",", "False", ")" ]
humanize number(9000 to 9,000)
[ "humanize", "number", "(", "9000", "to", "9", "000", ")" ]
python
train
jjgomera/iapws
iapws/_iapws.py
https://github.com/jjgomera/iapws/blob/1e5812aab38212fb8a63736f61cdcfa427d223b1/iapws/_iapws.py#L1183-L1229
def _D2O_ThCond(rho, T): """Equation for the thermal conductivity of heavy water Parameters ---------- rho : float Density, [kg/m³] T : float Temperature, [K] Returns ------- k : float Thermal conductivity, [W/mK] Examples -------- >>> _D2O_ThCond(998, 298.15) 0.6077128675880629 >>> _D2O_ThCond(0, 873.15) 0.07910346589648833 References ---------- IAPWS, Revised Release on Viscosity and Thermal Conductivity of Heavy Water Substance, http://www.iapws.org/relguide/TransD2O-2007.pdf """ rhor = rho/358 Tr = T/643.847 tau = Tr/(abs(Tr-1.1)+1.1) no = [1.0, 37.3223, 22.5485, 13.0465, 0.0, -2.60735] Lo = sum([Li*Tr**i for i, Li in enumerate(no)]) nr = [483.656, -191.039, 73.0358, -7.57467] Lr = -167.31*(1-exp(-2.506*rhor))+sum( [Li*rhor**(i+1) for i, Li in enumerate(nr)]) f1 = exp(0.144847*Tr-5.64493*Tr**2) f2 = exp(-2.8*(rhor-1)**2)-0.080738543*exp(-17.943*(rhor-0.125698)**2) f3 = 1+exp(60*(tau-1)+20) f4 = 1+exp(100*(tau-1)+15) Lc = 35429.6*f1*f2*(1+f2**2*(5e9*f1**4/f3+3.5*f2/f4)) Ll = -741.112*f1**1.2*(1-exp(-(rhor/2.5)**10)) return 0.742128e-3*(Lo+Lr+Lc+Ll)
[ "def", "_D2O_ThCond", "(", "rho", ",", "T", ")", ":", "rhor", "=", "rho", "/", "358", "Tr", "=", "T", "/", "643.847", "tau", "=", "Tr", "/", "(", "abs", "(", "Tr", "-", "1.1", ")", "+", "1.1", ")", "no", "=", "[", "1.0", ",", "37.3223", ",", "22.5485", ",", "13.0465", ",", "0.0", ",", "-", "2.60735", "]", "Lo", "=", "sum", "(", "[", "Li", "*", "Tr", "**", "i", "for", "i", ",", "Li", "in", "enumerate", "(", "no", ")", "]", ")", "nr", "=", "[", "483.656", ",", "-", "191.039", ",", "73.0358", ",", "-", "7.57467", "]", "Lr", "=", "-", "167.31", "*", "(", "1", "-", "exp", "(", "-", "2.506", "*", "rhor", ")", ")", "+", "sum", "(", "[", "Li", "*", "rhor", "**", "(", "i", "+", "1", ")", "for", "i", ",", "Li", "in", "enumerate", "(", "nr", ")", "]", ")", "f1", "=", "exp", "(", "0.144847", "*", "Tr", "-", "5.64493", "*", "Tr", "**", "2", ")", "f2", "=", "exp", "(", "-", "2.8", "*", "(", "rhor", "-", "1", ")", "**", "2", ")", "-", "0.080738543", "*", "exp", "(", "-", "17.943", "*", "(", "rhor", "-", "0.125698", ")", "**", "2", ")", "f3", "=", "1", "+", "exp", "(", "60", "*", "(", "tau", "-", "1", ")", "+", "20", ")", "f4", "=", "1", "+", "exp", "(", "100", "*", "(", "tau", "-", "1", ")", "+", "15", ")", "Lc", "=", "35429.6", "*", "f1", "*", "f2", "*", "(", "1", "+", "f2", "**", "2", "*", "(", "5e9", "*", "f1", "**", "4", "/", "f3", "+", "3.5", "*", "f2", "/", "f4", ")", ")", "Ll", "=", "-", "741.112", "*", "f1", "**", "1.2", "*", "(", "1", "-", "exp", "(", "-", "(", "rhor", "/", "2.5", ")", "**", "10", ")", ")", "return", "0.742128e-3", "*", "(", "Lo", "+", "Lr", "+", "Lc", "+", "Ll", ")" ]
Equation for the thermal conductivity of heavy water Parameters ---------- rho : float Density, [kg/m³] T : float Temperature, [K] Returns ------- k : float Thermal conductivity, [W/mK] Examples -------- >>> _D2O_ThCond(998, 298.15) 0.6077128675880629 >>> _D2O_ThCond(0, 873.15) 0.07910346589648833 References ---------- IAPWS, Revised Release on Viscosity and Thermal Conductivity of Heavy Water Substance, http://www.iapws.org/relguide/TransD2O-2007.pdf
[ "Equation", "for", "the", "thermal", "conductivity", "of", "heavy", "water" ]
python
train
thautwarm/Redy
Redy/Tools/PathLib.py
https://github.com/thautwarm/Redy/blob/8beee5c5f752edfd2754bb1e6b5f4acb016a7770/Redy/Tools/PathLib.py#L208-L233
def relative(self, start: typing.Optional[typing.Union['Path', str]] = None ) -> str: """ :param start: an object of NoneType or Path or str. :return: a string If `start` is None: returns the relative path of current Path object from its own directory. Else: returns the relative path of current Path object from the `start` path. e.g - Dir1 - Dir2 - File1 - File2 - Dir3 - File3 Path(<path of File1>).relative() => "<filename of File1>" Path(<path of Dir2>).relative() => "<directory name of Dir1>" Path(<path of File3>).relative(<path of File1>) => "../Dir2/<filename of File1>" """ if start is None: return os.path.split(str(self))[1] if isinstance(start, Path): start = str(start) return os.path.relpath(str(self), start)
[ "def", "relative", "(", "self", ",", "start", ":", "typing", ".", "Optional", "[", "typing", ".", "Union", "[", "'Path'", ",", "str", "]", "]", "=", "None", ")", "->", "str", ":", "if", "start", "is", "None", ":", "return", "os", ".", "path", ".", "split", "(", "str", "(", "self", ")", ")", "[", "1", "]", "if", "isinstance", "(", "start", ",", "Path", ")", ":", "start", "=", "str", "(", "start", ")", "return", "os", ".", "path", ".", "relpath", "(", "str", "(", "self", ")", ",", "start", ")" ]
:param start: an object of NoneType or Path or str. :return: a string If `start` is None: returns the relative path of current Path object from its own directory. Else: returns the relative path of current Path object from the `start` path. e.g - Dir1 - Dir2 - File1 - File2 - Dir3 - File3 Path(<path of File1>).relative() => "<filename of File1>" Path(<path of Dir2>).relative() => "<directory name of Dir1>" Path(<path of File3>).relative(<path of File1>) => "../Dir2/<filename of File1>"
[ ":", "param", "start", ":", "an", "object", "of", "NoneType", "or", "Path", "or", "str", ".", ":", "return", ":", "a", "string", "If", "start", "is", "None", ":", "returns", "the", "relative", "path", "of", "current", "Path", "object", "from", "its", "own", "directory", ".", "Else", ":", "returns", "the", "relative", "path", "of", "current", "Path", "object", "from", "the", "start", "path", ".", "e", ".", "g", "-", "Dir1", "-", "Dir2", "-", "File1", "-", "File2", "-", "Dir3", "-", "File3", "Path", "(", "<path", "of", "File1", ">", ")", ".", "relative", "()", "=", ">", "<filename", "of", "File1", ">", "Path", "(", "<path", "of", "Dir2", ">", ")", ".", "relative", "()", "=", ">", "<directory", "name", "of", "Dir1", ">", "Path", "(", "<path", "of", "File3", ">", ")", ".", "relative", "(", "<path", "of", "File1", ">", ")", "=", ">", "..", "/", "Dir2", "/", "<filename", "of", "File1", ">" ]
python
train
DataONEorg/d1_python
gmn/src/d1_gmn/app/auth.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/gmn/src/d1_gmn/app/auth.py#L65-L72
def action_to_level(action): """Map action name to action level.""" try: return ACTION_LEVEL_MAP[action] except LookupError: raise d1_common.types.exceptions.InvalidRequest( 0, 'Unknown action. action="{}"'.format(action) )
[ "def", "action_to_level", "(", "action", ")", ":", "try", ":", "return", "ACTION_LEVEL_MAP", "[", "action", "]", "except", "LookupError", ":", "raise", "d1_common", ".", "types", ".", "exceptions", ".", "InvalidRequest", "(", "0", ",", "'Unknown action. action=\"{}\"'", ".", "format", "(", "action", ")", ")" ]
Map action name to action level.
[ "Map", "action", "name", "to", "action", "level", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/models/transformer.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/transformer.py#L1921-L1931
def transformer_tall_pretrain_lm_tpu_adafactor_large(): """Hparams for transformer on LM pretraining on TPU, large model.""" hparams = transformer_tall_pretrain_lm_tpu_adafactor() hparams.hidden_size = 1024 hparams.num_heads = 16 hparams.filter_size = 32768 # max fitting in 16G memory is 49152, batch 2 hparams.batch_size = 4 hparams.multiproblem_mixing_schedule = "constant" # Task order: lm/en-de/en-fr/en-ro/de-en/fr-en/ro-en/cnndm/mnli/squad. hparams.multiproblem_per_task_threshold = "320,80,160,1,80,160,2,20,10,5" return hparams
[ "def", "transformer_tall_pretrain_lm_tpu_adafactor_large", "(", ")", ":", "hparams", "=", "transformer_tall_pretrain_lm_tpu_adafactor", "(", ")", "hparams", ".", "hidden_size", "=", "1024", "hparams", ".", "num_heads", "=", "16", "hparams", ".", "filter_size", "=", "32768", "# max fitting in 16G memory is 49152, batch 2", "hparams", ".", "batch_size", "=", "4", "hparams", ".", "multiproblem_mixing_schedule", "=", "\"constant\"", "# Task order: lm/en-de/en-fr/en-ro/de-en/fr-en/ro-en/cnndm/mnli/squad.", "hparams", ".", "multiproblem_per_task_threshold", "=", "\"320,80,160,1,80,160,2,20,10,5\"", "return", "hparams" ]
Hparams for transformer on LM pretraining on TPU, large model.
[ "Hparams", "for", "transformer", "on", "LM", "pretraining", "on", "TPU", "large", "model", "." ]
python
train
pydata/xarray
xarray/core/missing.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/missing.py#L259-L272
def ffill(arr, dim=None, limit=None): '''forward fill missing values''' import bottleneck as bn axis = arr.get_axis_num(dim) # work around for bottleneck 178 _limit = limit if limit is not None else arr.shape[axis] return apply_ufunc(bn.push, arr, dask='parallelized', keep_attrs=True, output_dtypes=[arr.dtype], kwargs=dict(n=_limit, axis=axis)).transpose(*arr.dims)
[ "def", "ffill", "(", "arr", ",", "dim", "=", "None", ",", "limit", "=", "None", ")", ":", "import", "bottleneck", "as", "bn", "axis", "=", "arr", ".", "get_axis_num", "(", "dim", ")", "# work around for bottleneck 178", "_limit", "=", "limit", "if", "limit", "is", "not", "None", "else", "arr", ".", "shape", "[", "axis", "]", "return", "apply_ufunc", "(", "bn", ".", "push", ",", "arr", ",", "dask", "=", "'parallelized'", ",", "keep_attrs", "=", "True", ",", "output_dtypes", "=", "[", "arr", ".", "dtype", "]", ",", "kwargs", "=", "dict", "(", "n", "=", "_limit", ",", "axis", "=", "axis", ")", ")", ".", "transpose", "(", "*", "arr", ".", "dims", ")" ]
forward fill missing values
[ "forward", "fill", "missing", "values" ]
python
train
tsroten/pynlpir
pynlpir/__init__.py
https://github.com/tsroten/pynlpir/blob/8d5e994796a2b5d513f7db8d76d7d24a85d531b1/pynlpir/__init__.py#L50-L110
def open(data_dir=nlpir.PACKAGE_DIR, encoding=ENCODING, encoding_errors=ENCODING_ERRORS, license_code=None): """Initializes the NLPIR API. This calls the function :func:`~pynlpir.nlpir.Init`. :param str data_dir: The absolute path to the directory that has NLPIR's `Data` directory (defaults to :data:`pynlpir.nlpir.PACKAGE_DIR`). :param str encoding: The encoding that the Chinese source text will be in (defaults to ``'utf_8'``). Possible values include ``'gbk'``, ``'utf_8'``, or ``'big5'``. :param str encoding_errors: The desired encoding error handling scheme. Possible values include ``'strict'``, ``'ignore'``, and ``'replace'``. The default error handler is 'strict' meaning that encoding errors raise :class:`ValueError` (or a more codec specific subclass, such as :class:`UnicodeEncodeError`). :param str license_code: The license code that should be used when initializing NLPIR. This is generally only used by commercial users. :raises RuntimeError: The NLPIR API failed to initialize. Sometimes, NLPIR leaves an error log in the current working directory or NLPIR's ``Data`` directory that provides more detailed messages (but this isn't always the case). :raises LicenseError: The NLPIR license appears to be missing or expired. """ if license_code is None: license_code = '' global ENCODING if encoding.lower() in ('utf_8', 'utf-8', 'u8', 'utf', 'utf8'): ENCODING = 'utf_8' encoding_constant = nlpir.UTF8_CODE elif encoding.lower() in ('gbk', '936', 'cp936', 'ms936'): ENCODING = 'gbk' encoding_constant = nlpir.GBK_CODE elif encoding.lower() in ('big5', 'big5-tw', 'csbig5'): ENCODING = 'big5' encoding_constant = nlpir.BIG5_CODE else: raise ValueError("encoding must be one of 'utf_8', 'big5', or 'gbk'.") logger.debug("Initializing the NLPIR API: 'data_dir': '{}', 'encoding': " "'{}', 'license_code': '{}'".format( data_dir, encoding, license_code)) global ENCODING_ERRORS if encoding_errors not in ('strict', 'ignore', 'replace'): raise ValueError("encoding_errors must be one of 'strict', 'ignore', " "or 'replace'.") else: ENCODING_ERRORS = encoding_errors # Init in Python 3 expects bytes, not strings. if is_python3 and isinstance(data_dir, str): data_dir = _encode(data_dir) if is_python3 and isinstance(license_code, str): license_code = _encode(license_code) if not nlpir.Init(data_dir, encoding_constant, license_code): _attempt_to_raise_license_error(data_dir) raise RuntimeError("NLPIR function 'NLPIR_Init' failed.") else: logger.debug("NLPIR API initialized.")
[ "def", "open", "(", "data_dir", "=", "nlpir", ".", "PACKAGE_DIR", ",", "encoding", "=", "ENCODING", ",", "encoding_errors", "=", "ENCODING_ERRORS", ",", "license_code", "=", "None", ")", ":", "if", "license_code", "is", "None", ":", "license_code", "=", "''", "global", "ENCODING", "if", "encoding", ".", "lower", "(", ")", "in", "(", "'utf_8'", ",", "'utf-8'", ",", "'u8'", ",", "'utf'", ",", "'utf8'", ")", ":", "ENCODING", "=", "'utf_8'", "encoding_constant", "=", "nlpir", ".", "UTF8_CODE", "elif", "encoding", ".", "lower", "(", ")", "in", "(", "'gbk'", ",", "'936'", ",", "'cp936'", ",", "'ms936'", ")", ":", "ENCODING", "=", "'gbk'", "encoding_constant", "=", "nlpir", ".", "GBK_CODE", "elif", "encoding", ".", "lower", "(", ")", "in", "(", "'big5'", ",", "'big5-tw'", ",", "'csbig5'", ")", ":", "ENCODING", "=", "'big5'", "encoding_constant", "=", "nlpir", ".", "BIG5_CODE", "else", ":", "raise", "ValueError", "(", "\"encoding must be one of 'utf_8', 'big5', or 'gbk'.\"", ")", "logger", ".", "debug", "(", "\"Initializing the NLPIR API: 'data_dir': '{}', 'encoding': \"", "\"'{}', 'license_code': '{}'\"", ".", "format", "(", "data_dir", ",", "encoding", ",", "license_code", ")", ")", "global", "ENCODING_ERRORS", "if", "encoding_errors", "not", "in", "(", "'strict'", ",", "'ignore'", ",", "'replace'", ")", ":", "raise", "ValueError", "(", "\"encoding_errors must be one of 'strict', 'ignore', \"", "\"or 'replace'.\"", ")", "else", ":", "ENCODING_ERRORS", "=", "encoding_errors", "# Init in Python 3 expects bytes, not strings.", "if", "is_python3", "and", "isinstance", "(", "data_dir", ",", "str", ")", ":", "data_dir", "=", "_encode", "(", "data_dir", ")", "if", "is_python3", "and", "isinstance", "(", "license_code", ",", "str", ")", ":", "license_code", "=", "_encode", "(", "license_code", ")", "if", "not", "nlpir", ".", "Init", "(", "data_dir", ",", "encoding_constant", ",", "license_code", ")", ":", "_attempt_to_raise_license_error", "(", "data_dir", ")", "raise", "RuntimeError", "(", "\"NLPIR function 'NLPIR_Init' failed.\"", ")", "else", ":", "logger", ".", "debug", "(", "\"NLPIR API initialized.\"", ")" ]
Initializes the NLPIR API. This calls the function :func:`~pynlpir.nlpir.Init`. :param str data_dir: The absolute path to the directory that has NLPIR's `Data` directory (defaults to :data:`pynlpir.nlpir.PACKAGE_DIR`). :param str encoding: The encoding that the Chinese source text will be in (defaults to ``'utf_8'``). Possible values include ``'gbk'``, ``'utf_8'``, or ``'big5'``. :param str encoding_errors: The desired encoding error handling scheme. Possible values include ``'strict'``, ``'ignore'``, and ``'replace'``. The default error handler is 'strict' meaning that encoding errors raise :class:`ValueError` (or a more codec specific subclass, such as :class:`UnicodeEncodeError`). :param str license_code: The license code that should be used when initializing NLPIR. This is generally only used by commercial users. :raises RuntimeError: The NLPIR API failed to initialize. Sometimes, NLPIR leaves an error log in the current working directory or NLPIR's ``Data`` directory that provides more detailed messages (but this isn't always the case). :raises LicenseError: The NLPIR license appears to be missing or expired.
[ "Initializes", "the", "NLPIR", "API", "." ]
python
train
pystorm/pystorm
pystorm/serializers/json_serializer.py
https://github.com/pystorm/pystorm/blob/0f853e007c79e03cefdb4a0794423f84dce4c2f3/pystorm/serializers/json_serializer.py#L39-L91
def read_message(self): """The Storm multilang protocol consists of JSON messages followed by a newline and "end\n". All of Storm's messages (for either bolts or spouts) should be of the form:: '<command or task_id form prior emit>\\nend\\n' Command example, an incoming Tuple to a bolt:: '{ "id": "-6955786537413359385", "comp": "1", "stream": "1", "task": 9, "tuple": ["snow white and the seven dwarfs", "field2", 3]}\\nend\\n' Command example for a spout to emit its next Tuple:: '{"command": "next"}\\nend\\n' Example, the task IDs a prior emit was sent to:: '[12, 22, 24]\\nend\\n' The edge case of where we read ``''`` from ``input_stream`` indicating EOF, usually means that communication with the supervisor has been severed. """ msg = "" num_blank_lines = 0 while True: # readline will return trailing \n so that output is unambigious, we # should only have line == '' if we're at EOF with self._reader_lock: line = self.input_stream.readline() if line == "end\n": break elif line == "": raise StormWentAwayError() elif line == "\n": num_blank_lines += 1 if num_blank_lines % 1000 == 0: log.warn( "While trying to read a command or pending task " "ID, Storm has instead sent %s '\\n' messages.", num_blank_lines, ) continue msg = "{}{}\n".format(msg, line[0:-1]) try: return json.loads(msg) except Exception: log.error("JSON decode error for message: %r", msg, exc_info=True) raise
[ "def", "read_message", "(", "self", ")", ":", "msg", "=", "\"\"", "num_blank_lines", "=", "0", "while", "True", ":", "# readline will return trailing \\n so that output is unambigious, we", "# should only have line == '' if we're at EOF", "with", "self", ".", "_reader_lock", ":", "line", "=", "self", ".", "input_stream", ".", "readline", "(", ")", "if", "line", "==", "\"end\\n\"", ":", "break", "elif", "line", "==", "\"\"", ":", "raise", "StormWentAwayError", "(", ")", "elif", "line", "==", "\"\\n\"", ":", "num_blank_lines", "+=", "1", "if", "num_blank_lines", "%", "1000", "==", "0", ":", "log", ".", "warn", "(", "\"While trying to read a command or pending task \"", "\"ID, Storm has instead sent %s '\\\\n' messages.\"", ",", "num_blank_lines", ",", ")", "continue", "msg", "=", "\"{}{}\\n\"", ".", "format", "(", "msg", ",", "line", "[", "0", ":", "-", "1", "]", ")", "try", ":", "return", "json", ".", "loads", "(", "msg", ")", "except", "Exception", ":", "log", ".", "error", "(", "\"JSON decode error for message: %r\"", ",", "msg", ",", "exc_info", "=", "True", ")", "raise" ]
The Storm multilang protocol consists of JSON messages followed by a newline and "end\n". All of Storm's messages (for either bolts or spouts) should be of the form:: '<command or task_id form prior emit>\\nend\\n' Command example, an incoming Tuple to a bolt:: '{ "id": "-6955786537413359385", "comp": "1", "stream": "1", "task": 9, "tuple": ["snow white and the seven dwarfs", "field2", 3]}\\nend\\n' Command example for a spout to emit its next Tuple:: '{"command": "next"}\\nend\\n' Example, the task IDs a prior emit was sent to:: '[12, 22, 24]\\nend\\n' The edge case of where we read ``''`` from ``input_stream`` indicating EOF, usually means that communication with the supervisor has been severed.
[ "The", "Storm", "multilang", "protocol", "consists", "of", "JSON", "messages", "followed", "by", "a", "newline", "and", "end", "\\", "n", "." ]
python
train
apache/incubator-mxnet
python/mxnet/model.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/model.py#L58-L80
def _create_sparse_kvstore(kvstore): """Create kvstore assuming some parameters' storage types are row_sparse. Parameters ---------- kvstore : KVStore or str The kvstore. Returns ------- kvstore : KVStore update_on_kvstore : bool. Always True. """ # always update on kvstore update_on_kvstore = True if isinstance(kvstore, kvs.KVStore): kv = kvstore elif isinstance(kvstore, str): kv = kvs.create(kvstore) else: raise TypeError("Cannot create '%s' KVStore with row_sparse parameters. " "The type must be KVStore or str." % kvstore) return (kv, update_on_kvstore)
[ "def", "_create_sparse_kvstore", "(", "kvstore", ")", ":", "# always update on kvstore", "update_on_kvstore", "=", "True", "if", "isinstance", "(", "kvstore", ",", "kvs", ".", "KVStore", ")", ":", "kv", "=", "kvstore", "elif", "isinstance", "(", "kvstore", ",", "str", ")", ":", "kv", "=", "kvs", ".", "create", "(", "kvstore", ")", "else", ":", "raise", "TypeError", "(", "\"Cannot create '%s' KVStore with row_sparse parameters. \"", "\"The type must be KVStore or str.\"", "%", "kvstore", ")", "return", "(", "kv", ",", "update_on_kvstore", ")" ]
Create kvstore assuming some parameters' storage types are row_sparse. Parameters ---------- kvstore : KVStore or str The kvstore. Returns ------- kvstore : KVStore update_on_kvstore : bool. Always True.
[ "Create", "kvstore", "assuming", "some", "parameters", "storage", "types", "are", "row_sparse", "." ]
python
train
ANTsX/ANTsPy
ants/core/ants_image.py
https://github.com/ANTsX/ANTsPy/blob/638020af2cdfc5ff4bdb9809ffe67aa505727a3b/ants/core/ants_image.py#L129-L138
def origin(self): """ Get image origin Returns ------- tuple """ libfn = utils.get_lib_fn('getOrigin%s'%self._libsuffix) return libfn(self.pointer)
[ "def", "origin", "(", "self", ")", ":", "libfn", "=", "utils", ".", "get_lib_fn", "(", "'getOrigin%s'", "%", "self", ".", "_libsuffix", ")", "return", "libfn", "(", "self", ".", "pointer", ")" ]
Get image origin Returns ------- tuple
[ "Get", "image", "origin" ]
python
train
rocky/python-xdis
xdis/dropbox/decrypt25.py
https://github.com/rocky/python-xdis/blob/46a2902ae8f5d8eee495eed67ac0690fd545453d/xdis/dropbox/decrypt25.py#L168-L175
def loads(s): """ xdis.marshal.load() but with its dispatch load_code() function replaced with our decoding version. """ um = xmarshal._FastUnmarshaller(s) um.dispatch[xmarshal.TYPE_CODE] = load_code return um.load()
[ "def", "loads", "(", "s", ")", ":", "um", "=", "xmarshal", ".", "_FastUnmarshaller", "(", "s", ")", "um", ".", "dispatch", "[", "xmarshal", ".", "TYPE_CODE", "]", "=", "load_code", "return", "um", ".", "load", "(", ")" ]
xdis.marshal.load() but with its dispatch load_code() function replaced with our decoding version.
[ "xdis", ".", "marshal", ".", "load", "()", "but", "with", "its", "dispatch", "load_code", "()", "function", "replaced", "with", "our", "decoding", "version", "." ]
python
train
rgs1/zk_shell
zk_shell/acl.py
https://github.com/rgs1/zk_shell/blob/bbf34fdfcf1f81100e2a5816fad8af6afc782a54/zk_shell/acl.py#L85-L91
def from_dict(cls, acl_dict): """ ACL from dict """ perms = acl_dict.get("perms", Permissions.ALL) id_dict = acl_dict.get("id", {}) id_scheme = id_dict.get("scheme", "world") id_id = id_dict.get("id", "anyone") return ACL(perms, Id(id_scheme, id_id))
[ "def", "from_dict", "(", "cls", ",", "acl_dict", ")", ":", "perms", "=", "acl_dict", ".", "get", "(", "\"perms\"", ",", "Permissions", ".", "ALL", ")", "id_dict", "=", "acl_dict", ".", "get", "(", "\"id\"", ",", "{", "}", ")", "id_scheme", "=", "id_dict", ".", "get", "(", "\"scheme\"", ",", "\"world\"", ")", "id_id", "=", "id_dict", ".", "get", "(", "\"id\"", ",", "\"anyone\"", ")", "return", "ACL", "(", "perms", ",", "Id", "(", "id_scheme", ",", "id_id", ")", ")" ]
ACL from dict
[ "ACL", "from", "dict" ]
python
train
pypa/pipenv
pipenv/vendor/cerberus/validator.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/cerberus/validator.py#L341-L361
def _drop_nodes_from_errorpaths(self, _errors, dp_items, sp_items): """ Removes nodes by index from an errorpath, relatively to the basepaths of self. :param errors: A list of :class:`errors.ValidationError` instances. :param dp_items: A list of integers, pointing at the nodes to drop from the :attr:`document_path`. :param sp_items: Alike ``dp_items``, but for :attr:`schema_path`. """ dp_basedepth = len(self.document_path) sp_basedepth = len(self.schema_path) for error in _errors: for i in sorted(dp_items, reverse=True): error.document_path = \ drop_item_from_tuple(error.document_path, dp_basedepth + i) for i in sorted(sp_items, reverse=True): error.schema_path = \ drop_item_from_tuple(error.schema_path, sp_basedepth + i) if error.child_errors: self._drop_nodes_from_errorpaths(error.child_errors, dp_items, sp_items)
[ "def", "_drop_nodes_from_errorpaths", "(", "self", ",", "_errors", ",", "dp_items", ",", "sp_items", ")", ":", "dp_basedepth", "=", "len", "(", "self", ".", "document_path", ")", "sp_basedepth", "=", "len", "(", "self", ".", "schema_path", ")", "for", "error", "in", "_errors", ":", "for", "i", "in", "sorted", "(", "dp_items", ",", "reverse", "=", "True", ")", ":", "error", ".", "document_path", "=", "drop_item_from_tuple", "(", "error", ".", "document_path", ",", "dp_basedepth", "+", "i", ")", "for", "i", "in", "sorted", "(", "sp_items", ",", "reverse", "=", "True", ")", ":", "error", ".", "schema_path", "=", "drop_item_from_tuple", "(", "error", ".", "schema_path", ",", "sp_basedepth", "+", "i", ")", "if", "error", ".", "child_errors", ":", "self", ".", "_drop_nodes_from_errorpaths", "(", "error", ".", "child_errors", ",", "dp_items", ",", "sp_items", ")" ]
Removes nodes by index from an errorpath, relatively to the basepaths of self. :param errors: A list of :class:`errors.ValidationError` instances. :param dp_items: A list of integers, pointing at the nodes to drop from the :attr:`document_path`. :param sp_items: Alike ``dp_items``, but for :attr:`schema_path`.
[ "Removes", "nodes", "by", "index", "from", "an", "errorpath", "relatively", "to", "the", "basepaths", "of", "self", "." ]
python
train
mkouhei/bootstrap-py
bootstrap_py/commands.py
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/commands.py#L54-L69
def valid_url(url): """Validate url. :rtype: str :return: url :param str url: package homepage url. """ regex = re.compile( r'^(?:http)s?://' r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+' r'(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?))' r'(?:/?|[/?]\S+)$', re.IGNORECASE) if not regex.match(url): raise argparse.ArgumentTypeError('"{0}" is invalid url.'.format(url)) return url
[ "def", "valid_url", "(", "url", ")", ":", "regex", "=", "re", ".", "compile", "(", "r'^(?:http)s?://'", "r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+'", "r'(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?))'", "r'(?:/?|[/?]\\S+)$'", ",", "re", ".", "IGNORECASE", ")", "if", "not", "regex", ".", "match", "(", "url", ")", ":", "raise", "argparse", ".", "ArgumentTypeError", "(", "'\"{0}\" is invalid url.'", ".", "format", "(", "url", ")", ")", "return", "url" ]
Validate url. :rtype: str :return: url :param str url: package homepage url.
[ "Validate", "url", "." ]
python
train
raamana/mrivis
mrivis/base.py
https://github.com/raamana/mrivis/blob/199ad096b8a1d825f69109e7218a81b2f1cec756/mrivis/base.py#L1110-L1126
def _apply_mask(self, roi_mask): """Removes voxels outside the given mask or ROI set.""" # TODO ensure compatible with input image # - must have < N dim and same size in moving dims. rows_to_delete = list() # to allow for additional masks to be applied in the future if isinstance(roi_mask, np.ndarray): # not (roi_mask is None or roi_mask=='auto'): self._set_roi_mask(roi_mask) rows_roi = np.where(self.roi_mask.flatten() == cfg.background_value) # TODO below would cause differences in size/shape across mask and carpet! self.carpet = np.delete(self.carpet, rows_roi, axis=0) else: self.roi_mask = np.ones(self.carpet.shape)
[ "def", "_apply_mask", "(", "self", ",", "roi_mask", ")", ":", "# TODO ensure compatible with input image", "# - must have < N dim and same size in moving dims.", "rows_to_delete", "=", "list", "(", ")", "# to allow for additional masks to be applied in the future", "if", "isinstance", "(", "roi_mask", ",", "np", ".", "ndarray", ")", ":", "# not (roi_mask is None or roi_mask=='auto'):", "self", ".", "_set_roi_mask", "(", "roi_mask", ")", "rows_roi", "=", "np", ".", "where", "(", "self", ".", "roi_mask", ".", "flatten", "(", ")", "==", "cfg", ".", "background_value", ")", "# TODO below would cause differences in size/shape across mask and carpet!", "self", ".", "carpet", "=", "np", ".", "delete", "(", "self", ".", "carpet", ",", "rows_roi", ",", "axis", "=", "0", ")", "else", ":", "self", ".", "roi_mask", "=", "np", ".", "ones", "(", "self", ".", "carpet", ".", "shape", ")" ]
Removes voxels outside the given mask or ROI set.
[ "Removes", "voxels", "outside", "the", "given", "mask", "or", "ROI", "set", "." ]
python
train
wummel/linkchecker
linkcheck/director/console.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/director/console.py#L143-L155
def print_app_info (out=stderr): """Print system and application info (output defaults to stderr).""" print(_("System info:"), file=out) print(configuration.App, file=out) print(_("Released on:"), configuration.ReleaseDate, file=out) print(_("Python %(version)s on %(platform)s") % {"version": sys.version, "platform": sys.platform}, file=out) for key in PYTHON_ENV_VARS: print_env_info(key, out=out) print(configuration.get_modules_info(), file=out) stime = strformat.strtime(time.time()) print(_("Local time:"), stime, file=out) print(_("sys.argv:"), sys.argv, file=out)
[ "def", "print_app_info", "(", "out", "=", "stderr", ")", ":", "print", "(", "_", "(", "\"System info:\"", ")", ",", "file", "=", "out", ")", "print", "(", "configuration", ".", "App", ",", "file", "=", "out", ")", "print", "(", "_", "(", "\"Released on:\"", ")", ",", "configuration", ".", "ReleaseDate", ",", "file", "=", "out", ")", "print", "(", "_", "(", "\"Python %(version)s on %(platform)s\"", ")", "%", "{", "\"version\"", ":", "sys", ".", "version", ",", "\"platform\"", ":", "sys", ".", "platform", "}", ",", "file", "=", "out", ")", "for", "key", "in", "PYTHON_ENV_VARS", ":", "print_env_info", "(", "key", ",", "out", "=", "out", ")", "print", "(", "configuration", ".", "get_modules_info", "(", ")", ",", "file", "=", "out", ")", "stime", "=", "strformat", ".", "strtime", "(", "time", ".", "time", "(", ")", ")", "print", "(", "_", "(", "\"Local time:\"", ")", ",", "stime", ",", "file", "=", "out", ")", "print", "(", "_", "(", "\"sys.argv:\"", ")", ",", "sys", ".", "argv", ",", "file", "=", "out", ")" ]
Print system and application info (output defaults to stderr).
[ "Print", "system", "and", "application", "info", "(", "output", "defaults", "to", "stderr", ")", "." ]
python
train
tanghaibao/jcvi
jcvi/apps/base.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/base.py#L1228-L1257
def pushover(message, token, user, title="JCVI: Job Monitor", \ priority=0, timestamp=None): """ pushover.net python API <https://pushover.net/faq#library-python> """ assert -1 <= priority <= 2, \ "Priority should be an int() between -1 and 2" if timestamp == None: from time import time timestamp = int(time()) retry, expire = (300, 3600) if priority == 2 \ else (None, None) conn = HTTPSConnection("api.pushover.net:443") conn.request("POST", "/1/messages.json", urlencode({ "token": token, "user": user, "message": message, "title": title, "priority": priority, "timestamp": timestamp, "retry": retry, "expire": expire, }), { "Content-type": "application/x-www-form-urlencoded" }) conn.getresponse()
[ "def", "pushover", "(", "message", ",", "token", ",", "user", ",", "title", "=", "\"JCVI: Job Monitor\"", ",", "priority", "=", "0", ",", "timestamp", "=", "None", ")", ":", "assert", "-", "1", "<=", "priority", "<=", "2", ",", "\"Priority should be an int() between -1 and 2\"", "if", "timestamp", "==", "None", ":", "from", "time", "import", "time", "timestamp", "=", "int", "(", "time", "(", ")", ")", "retry", ",", "expire", "=", "(", "300", ",", "3600", ")", "if", "priority", "==", "2", "else", "(", "None", ",", "None", ")", "conn", "=", "HTTPSConnection", "(", "\"api.pushover.net:443\"", ")", "conn", ".", "request", "(", "\"POST\"", ",", "\"/1/messages.json\"", ",", "urlencode", "(", "{", "\"token\"", ":", "token", ",", "\"user\"", ":", "user", ",", "\"message\"", ":", "message", ",", "\"title\"", ":", "title", ",", "\"priority\"", ":", "priority", ",", "\"timestamp\"", ":", "timestamp", ",", "\"retry\"", ":", "retry", ",", "\"expire\"", ":", "expire", ",", "}", ")", ",", "{", "\"Content-type\"", ":", "\"application/x-www-form-urlencoded\"", "}", ")", "conn", ".", "getresponse", "(", ")" ]
pushover.net python API <https://pushover.net/faq#library-python>
[ "pushover", ".", "net", "python", "API" ]
python
train