id_within_dataset
int64
46
2.71M
snippet
stringlengths
63
481k
tokens
sequencelengths
20
15.6k
language
stringclasses
2 values
nl
stringlengths
1
32.4k
is_duplicated
bool
2 classes
2,156,404
def __getattr__(cls: Callable, name: str) -> Callable: """ This gives us an alternate way to make a request: >>> Request.cat() {'jsonrpc': '2.0', 'method': 'cat', 'id': 1} That's the same as saying `Request("cat")`. """ def attr_handler(*args: Any, **kwargs: Any) -> "Request": return cls(name, *args, **kwargs) return attr_handler
[ "def", "__getattr__", "(", "cls", ":", "Callable", ",", "name", ":", "str", ")", "->", "Callable", ":", "def", "attr_handler", "(", "*", "args", ":", "Any", ",", "**", "kwargs", ":", "Any", ")", "->", "\"Request\"", ":", "return", "cls", "(", "name", ",", "*", "args", ",", "**", "kwargs", ")", "return", "attr_handler" ]
python
This gives us an alternate way to make a request: >>> Request.cat() {'jsonrpc': '2.0', 'method': 'cat', 'id': 1} That's the same as saying `Request("cat")`.
false
2,692,711
def __deepcopy__(self, memo): """ Copies a Cell but does not copy it's domain or values (extension) because these can be recomputed. TODO: test that amortized flags (__recompute=False) are not copied """ copied = copy.copy(self) # shallow copy copied.__dict__.update(self.__dict__) for key, val in copied.__dict__.items(): if not key in ['domain', 'values', '_domain_hash']: copied.__dict__[key] = copy.deepcopy(val, memo) return copied
[ "def", "__deepcopy__", "(", "self", ",", "memo", ")", ":", "copied", "=", "copy", ".", "copy", "(", "self", ")", "copied", ".", "__dict__", ".", "update", "(", "self", ".", "__dict__", ")", "for", "key", ",", "val", "in", "copied", ".", "__dict__", ".", "items", "(", ")", ":", "if", "not", "key", "in", "[", "'domain'", ",", "'values'", ",", "'_domain_hash'", "]", ":", "copied", ".", "__dict__", "[", "key", "]", "=", "copy", ".", "deepcopy", "(", "val", ",", "memo", ")", "return", "copied" ]
python
Copies a Cell but does not copy it's domain or values (extension) because these can be recomputed. TODO: test that amortized flags (__recompute=False) are not copied
false
2,212,788
def process_edge_dijkstra(self, current, neighbor, pred, q, component): ''' API: process_edge_dijkstra(self, current, neighbor, pred, q, component) Description: Used by search() method if the algo argument is 'Dijkstra'. Processes edges along Dijkstra's algorithm. User does not need to call this method directly. Input: current: Name of the current node. neighbor: Name of the neighbor node. pred: Predecessor tree. q: Data structure that holds nodes to be processed in a queue. component: component number. Post: 'color' attribute of nodes and edges may change. ''' if current is None: self.get_node(neighbor).set_attr('color', 'red') self.get_node(neighbor).set_attr('label', 0) q.push(neighbor, 0) self.display() self.get_node(neighbor).set_attr('color', 'black') return new_estimate = (q.get_priority(current) + self.get_edge_attr(current, neighbor, 'cost')) if neighbor not in pred or new_estimate < q.get_priority(neighbor): pred[neighbor] = current self.get_node(neighbor).set_attr('color', 'red') self.get_node(neighbor).set_attr('label', new_estimate) q.push(neighbor, new_estimate) self.display() self.get_node(neighbor).set_attr('color', 'black')
[ "def", "process_edge_dijkstra", "(", "self", ",", "current", ",", "neighbor", ",", "pred", ",", "q", ",", "component", ")", ":", "if", "current", "is", "None", ":", "self", ".", "get_node", "(", "neighbor", ")", ".", "set_attr", "(", "'color'", ",", "'red'", ")", "self", ".", "get_node", "(", "neighbor", ")", ".", "set_attr", "(", "'label'", ",", "0", ")", "q", ".", "push", "(", "neighbor", ",", "0", ")", "self", ".", "display", "(", ")", "self", ".", "get_node", "(", "neighbor", ")", ".", "set_attr", "(", "'color'", ",", "'black'", ")", "return", "new_estimate", "=", "(", "q", ".", "get_priority", "(", "current", ")", "+", "self", ".", "get_edge_attr", "(", "current", ",", "neighbor", ",", "'cost'", ")", ")", "if", "neighbor", "not", "in", "pred", "or", "new_estimate", "<", "q", ".", "get_priority", "(", "neighbor", ")", ":", "pred", "[", "neighbor", "]", "=", "current", "self", ".", "get_node", "(", "neighbor", ")", ".", "set_attr", "(", "'color'", ",", "'red'", ")", "self", ".", "get_node", "(", "neighbor", ")", ".", "set_attr", "(", "'label'", ",", "new_estimate", ")", "q", ".", "push", "(", "neighbor", ",", "new_estimate", ")", "self", ".", "display", "(", ")", "self", ".", "get_node", "(", "neighbor", ")", ".", "set_attr", "(", "'color'", ",", "'black'", ")" ]
python
API: process_edge_dijkstra(self, current, neighbor, pred, q, component) Description: Used by search() method if the algo argument is 'Dijkstra'. Processes edges along Dijkstra's algorithm. User does not need to call this method directly. Input: current: Name of the current node. neighbor: Name of the neighbor node. pred: Predecessor tree. q: Data structure that holds nodes to be processed in a queue. component: component number. Post: 'color' attribute of nodes and edges may change.
false
2,498,107
def parse_relationship(document, xmlcontent, rel_type): """Parse relationship document. Relationships hold information like external or internal references for links. Relationships are placed in file '_rels/document.xml.rels'. """ doc = etree.fromstring(xmlcontent) for elem in doc: if elem.tag == _name('{{{pr}}}Relationship'): rel = {'target': elem.attrib['Target'], 'type': elem.attrib['Type'], 'target_mode': elem.attrib.get('TargetMode', 'Internal')} document.relationships[rel_type][elem.attrib['Id']] = rel
[ "def", "parse_relationship", "(", "document", ",", "xmlcontent", ",", "rel_type", ")", ":", "doc", "=", "etree", ".", "fromstring", "(", "xmlcontent", ")", "for", "elem", "in", "doc", ":", "if", "elem", ".", "tag", "==", "_name", "(", "'{{{pr}}}Relationship'", ")", ":", "rel", "=", "{", "'target'", ":", "elem", ".", "attrib", "[", "'Target'", "]", ",", "'type'", ":", "elem", ".", "attrib", "[", "'Type'", "]", ",", "'target_mode'", ":", "elem", ".", "attrib", ".", "get", "(", "'TargetMode'", ",", "'Internal'", ")", "}", "document", ".", "relationships", "[", "rel_type", "]", "[", "elem", ".", "attrib", "[", "'Id'", "]", "]", "=", "rel" ]
python
Parse relationship document. Relationships hold information like external or internal references for links. Relationships are placed in file '_rels/document.xml.rels'.
false
2,657,810
def _tiles_from_bbox(bbox, zoom_level): """ * Returns all tiles for the specified bounding box """ if isinstance(bbox, dict): point_min = Point.from_latitude_longitude(latitude=bbox['tl'], longitude=bbox['tr']) point_max = Point.from_latitude_longitude(latitude=bbox['bl'], longitude=bbox['br']) elif isinstance(bbox, list): point_min = Point.from_latitude_longitude(latitude=bbox[1], longitude=bbox[0]) point_max = Point.from_latitude_longitude(latitude=bbox[3], longitude=bbox[2]) else: raise RuntimeError("bbox must bei either a dict or a list") tile_min = Tile.for_point(point_min, zoom_level) tile_max = Tile.for_point(point_max, zoom_level) tiles = [] for x in range(tile_min.tms_x, tile_max.tms_x + 1): for y in range(tile_min.tms_y, tile_max.tms_y + 1): tiles.append(Tile.from_tms(tms_x=x, tms_y=y, zoom=zoom_level)) return tiles
[ "def", "_tiles_from_bbox", "(", "bbox", ",", "zoom_level", ")", ":", "if", "isinstance", "(", "bbox", ",", "dict", ")", ":", "point_min", "=", "Point", ".", "from_latitude_longitude", "(", "latitude", "=", "bbox", "[", "'tl'", "]", ",", "longitude", "=", "bbox", "[", "'tr'", "]", ")", "point_max", "=", "Point", ".", "from_latitude_longitude", "(", "latitude", "=", "bbox", "[", "'bl'", "]", ",", "longitude", "=", "bbox", "[", "'br'", "]", ")", "elif", "isinstance", "(", "bbox", ",", "list", ")", ":", "point_min", "=", "Point", ".", "from_latitude_longitude", "(", "latitude", "=", "bbox", "[", "1", "]", ",", "longitude", "=", "bbox", "[", "0", "]", ")", "point_max", "=", "Point", ".", "from_latitude_longitude", "(", "latitude", "=", "bbox", "[", "3", "]", ",", "longitude", "=", "bbox", "[", "2", "]", ")", "else", ":", "raise", "RuntimeError", "(", "\"bbox must bei either a dict or a list\"", ")", "tile_min", "=", "Tile", ".", "for_point", "(", "point_min", ",", "zoom_level", ")", "tile_max", "=", "Tile", ".", "for_point", "(", "point_max", ",", "zoom_level", ")", "tiles", "=", "[", "]", "for", "x", "in", "range", "(", "tile_min", ".", "tms_x", ",", "tile_max", ".", "tms_x", "+", "1", ")", ":", "for", "y", "in", "range", "(", "tile_min", ".", "tms_y", ",", "tile_max", ".", "tms_y", "+", "1", ")", ":", "tiles", ".", "append", "(", "Tile", ".", "from_tms", "(", "tms_x", "=", "x", ",", "tms_y", "=", "y", ",", "zoom", "=", "zoom_level", ")", ")", "return", "tiles" ]
python
* Returns all tiles for the specified bounding box
false
2,502,832
def mechs(self): """ The set of mechanisms supported by the credential. :type: :class:`~gssapi.oids.OIDSet` """ if not self._mechs: self._mechs = self._inquire(False, False, False, True)[3] return self._mechs
[ "def", "mechs", "(", "self", ")", ":", "if", "not", "self", ".", "_mechs", ":", "self", ".", "_mechs", "=", "self", ".", "_inquire", "(", "False", ",", "False", ",", "False", ",", "True", ")", "[", "3", "]", "return", "self", ".", "_mechs" ]
python
The set of mechanisms supported by the credential. :type: :class:`~gssapi.oids.OIDSet`
false
2,197,520
def _check_valid_data(self, data): """Checks that the given data is a float array with four channels. Parameters ---------- data : :obj:`numpy.ndarray` The data to check. Raises ------ ValueError If the data is invalid. """ if data.dtype.type is not np.float32 and \ data.dtype.type is not np.float64: raise ValueError( 'Illegal data type. RGB-D images only support float arrays') if len(data.shape) != 3 and data.shape[2] != 4: raise ValueError( 'Illegal data type. RGB-D images only support four channel') color_data = data[:, :, :3] if np.any((color_data < 0) | (color_data > BINARY_IM_MAX_VAL)): raise ValueError( 'Color channels must be in the range (0, BINARY_IM_MAX_VAL)')
[ "def", "_check_valid_data", "(", "self", ",", "data", ")", ":", "if", "data", ".", "dtype", ".", "type", "is", "not", "np", ".", "float32", "and", "data", ".", "dtype", ".", "type", "is", "not", "np", ".", "float64", ":", "raise", "ValueError", "(", "'Illegal data type. RGB-D images only support float arrays'", ")", "if", "len", "(", "data", ".", "shape", ")", "!=", "3", "and", "data", ".", "shape", "[", "2", "]", "!=", "4", ":", "raise", "ValueError", "(", "'Illegal data type. RGB-D images only support four channel'", ")", "color_data", "=", "data", "[", ":", ",", ":", ",", ":", "3", "]", "if", "np", ".", "any", "(", "(", "color_data", "<", "0", ")", "|", "(", "color_data", ">", "BINARY_IM_MAX_VAL", ")", ")", ":", "raise", "ValueError", "(", "'Color channels must be in the range (0, BINARY_IM_MAX_VAL)'", ")" ]
python
Checks that the given data is a float array with four channels. Parameters ---------- data : :obj:`numpy.ndarray` The data to check. Raises ------ ValueError If the data is invalid.
false
1,939,830
def meval(self, f, *args, **kw): """Evaluates the given function call for all models Returns the results of the calls in a list """ # !! PART OF ORIGINAL DOCSTRING INCOMPATIBLE WITH CLASS INTERFACE !! # Example # ------- # We set up multiple stationary models, one for a reference (ground) # state, and two for biased states, and group them in a # MultiStationaryModel. # >>> from pyemma.thermo import StationaryModel, MEMM # >>> m_1 = StationaryModel(f=[1.0, 0], label='biased 1') # >>> m_2 = StationaryModel(f=[2.0, 0], label='biased 2') # >>> m_mult = MEMM([m_1, m_2], [0, 0], label='unbiased') # Compute the stationary distribution for the two biased models # >>> m_mult.meval('stationary_distribution') # [array([ 0.73105858, 0.26894142]), array([ 0.88079708, 0.11920292])] # We set up multiple Markov state models for different temperatures # and group them in a MultiStationaryModel. # >>> import numpy as np # >>> from pyemma.msm import MSM # >>> from pyemma.thermo import MEMM # >>> b = 20 # transition barrier in kJ / mol # >>> temps = np.arange(300, 500, 25) # temperatures 300 to 500 K # >>> p_trans = [np.exp(- b / kT) for kT in 0.00831*temps ] # >>> # build MSMs for different temperatures # >>> msms = [MSM(P=np.array([[1.0-p, p], [p, 1.0-p]])) for p in p_trans] # >>> # build Multi-MSM # >>> msm_mult = MEMM(pi=msms[0].stationary_distribution, label='300 K', models=msms) # Compute the timescales and see how they decay with temperature # Greetings to Arrhenius. # >>> np.hstack(msm_mult.meval('timescales')) # array([ 1523.83827932, 821.88040004, 484.06386176, 305.87880068, # 204.64109413, 143.49286817, 104.62539128, 78.83331598]) # !! END OF INCOMPATIBLE PART !! return [_call_member(M, f, *args, **kw) for M in self.models]
[ "def", "meval", "(", "self", ",", "f", ",", "*", "args", ",", "**", "kw", ")", ":", "return", "[", "_call_member", "(", "M", ",", "f", ",", "*", "args", ",", "**", "kw", ")", "for", "M", "in", "self", ".", "models", "]" ]
python
Evaluates the given function call for all models Returns the results of the calls in a list
false
1,832,444
def putmulti(self, kvpairs, dupdata=False, append=False, db=_DefaultDB): ''' Returns: Tuple of number of items consumed, number of items added ''' if self.readonly: raise s_exc.IsReadOnly() # Log playback isn't compatible with generators if not isinstance(kvpairs, list): kvpairs = list(kvpairs) try: self.dirty = True if not self.recovering: self._logXactOper(self.putmulti, kvpairs, dupdata=dupdata, append=append, db=db) with self.xact.cursor(db=db.db) as curs: return curs.putmulti(kvpairs, dupdata=dupdata, append=append) except lmdb.MapFullError: return self._handle_mapfull()
[ "def", "putmulti", "(", "self", ",", "kvpairs", ",", "dupdata", "=", "False", ",", "append", "=", "False", ",", "db", "=", "_DefaultDB", ")", ":", "if", "self", ".", "readonly", ":", "raise", "s_exc", ".", "IsReadOnly", "(", ")", "if", "not", "isinstance", "(", "kvpairs", ",", "list", ")", ":", "kvpairs", "=", "list", "(", "kvpairs", ")", "try", ":", "self", ".", "dirty", "=", "True", "if", "not", "self", ".", "recovering", ":", "self", ".", "_logXactOper", "(", "self", ".", "putmulti", ",", "kvpairs", ",", "dupdata", "=", "dupdata", ",", "append", "=", "append", ",", "db", "=", "db", ")", "with", "self", ".", "xact", ".", "cursor", "(", "db", "=", "db", ".", "db", ")", "as", "curs", ":", "return", "curs", ".", "putmulti", "(", "kvpairs", ",", "dupdata", "=", "dupdata", ",", "append", "=", "append", ")", "except", "lmdb", ".", "MapFullError", ":", "return", "self", ".", "_handle_mapfull", "(", ")" ]
python
Returns: Tuple of number of items consumed, number of items added
false
2,524,109
def detect_keep_boundary(start, end, namespaces): """a helper to inspect a link and see if we should keep the link boundary """ result_start, result_end = False, False parent_start = start.getparent() parent_end = end.getparent() if parent_start.tag == "{%s}p" % namespaces['text']: # more than one child in the containing paragraph ? # we keep the boundary result_start = len(parent_start.getchildren()) > 1 if parent_end.tag == "{%s}p" % namespaces['text']: # more than one child in the containing paragraph ? # we keep the boundary result_end = len(parent_end.getchildren()) > 1 return result_start, result_end
[ "def", "detect_keep_boundary", "(", "start", ",", "end", ",", "namespaces", ")", ":", "result_start", ",", "result_end", "=", "False", ",", "False", "parent_start", "=", "start", ".", "getparent", "(", ")", "parent_end", "=", "end", ".", "getparent", "(", ")", "if", "parent_start", ".", "tag", "==", "\"{%s}p\"", "%", "namespaces", "[", "'text'", "]", ":", "result_start", "=", "len", "(", "parent_start", ".", "getchildren", "(", ")", ")", ">", "1", "if", "parent_end", ".", "tag", "==", "\"{%s}p\"", "%", "namespaces", "[", "'text'", "]", ":", "result_end", "=", "len", "(", "parent_end", ".", "getchildren", "(", ")", ")", ">", "1", "return", "result_start", ",", "result_end" ]
python
a helper to inspect a link and see if we should keep the link boundary
false
1,987,125
def get_suggestions(self, prefix, fuzzy = False, num = 10, with_scores = False, with_payloads=False): """ Get a list of suggestions from the AutoCompleter, for a given prefix ### Parameters: - **prefix**: the prefix we are searching. **Must be valid ascii or utf-8** - **fuzzy**: If set to true, the prefix search is done in fuzzy mode. **NOTE**: Running fuzzy searches on short (<3 letters) prefixes can be very slow, and even scan the entire index. - **with_scores**: if set to true, we also return the (refactored) score of each suggestion. This is normally not needed, and is NOT the original score inserted into the index - **with_payloads**: Return suggestion payloads - **num**: The maximum number of results we return. Note that we might return less. The algorithm trims irrelevant suggestions. Returns a list of Suggestion objects. If with_scores was False, the score of all suggestions is 1. """ args = [AutoCompleter.SUGGET_COMMAND, self.key, prefix, 'MAX', num] if fuzzy: args.append(AutoCompleter.FUZZY) if with_scores: args.append(AutoCompleter.WITHSCORES) if with_payloads: args.append(AutoCompleter.WITHPAYLOADS) ret = self.redis.execute_command(*args) results = [] if not ret: return results parser = SuggestionParser(with_scores, with_payloads, ret) return [s for s in parser]
[ "def", "get_suggestions", "(", "self", ",", "prefix", ",", "fuzzy", "=", "False", ",", "num", "=", "10", ",", "with_scores", "=", "False", ",", "with_payloads", "=", "False", ")", ":", "args", "=", "[", "AutoCompleter", ".", "SUGGET_COMMAND", ",", "self", ".", "key", ",", "prefix", ",", "'MAX'", ",", "num", "]", "if", "fuzzy", ":", "args", ".", "append", "(", "AutoCompleter", ".", "FUZZY", ")", "if", "with_scores", ":", "args", ".", "append", "(", "AutoCompleter", ".", "WITHSCORES", ")", "if", "with_payloads", ":", "args", ".", "append", "(", "AutoCompleter", ".", "WITHPAYLOADS", ")", "ret", "=", "self", ".", "redis", ".", "execute_command", "(", "*", "args", ")", "results", "=", "[", "]", "if", "not", "ret", ":", "return", "results", "parser", "=", "SuggestionParser", "(", "with_scores", ",", "with_payloads", ",", "ret", ")", "return", "[", "s", "for", "s", "in", "parser", "]" ]
python
Get a list of suggestions from the AutoCompleter, for a given prefix ### Parameters: - **prefix**: the prefix we are searching. **Must be valid ascii or utf-8** - **fuzzy**: If set to true, the prefix search is done in fuzzy mode. **NOTE**: Running fuzzy searches on short (<3 letters) prefixes can be very slow, and even scan the entire index. - **with_scores**: if set to true, we also return the (refactored) score of each suggestion. This is normally not needed, and is NOT the original score inserted into the index - **with_payloads**: Return suggestion payloads - **num**: The maximum number of results we return. Note that we might return less. The algorithm trims irrelevant suggestions. Returns a list of Suggestion objects. If with_scores was False, the score of all suggestions is 1.
false
2,203,996
def function(self, x, y, amp, beta, n, m, complex_bool, center_x, center_y): """ :param x: x-coordinate, numpy array :param y: y-ccordinate, numpy array :param amp: amplitude normalization :param beta: shaplet scale :param n: order of polynomial :param m: roational invariance :param center_x: center of shapelet :param center_y: center of shapelet :return: amplitude of shapelet at possition (x, y) """ r, phi = param_util.cart2polar(x, y, center=np.array([center_x, center_y])) if complex_bool is True: return amp * self._chi_n_m(r, beta, n, m) * np.exp(-1j * m * phi).imag else: return amp * self._chi_n_m(r, beta, n, m) * np.exp(-1j * m * phi).real
[ "def", "function", "(", "self", ",", "x", ",", "y", ",", "amp", ",", "beta", ",", "n", ",", "m", ",", "complex_bool", ",", "center_x", ",", "center_y", ")", ":", "r", ",", "phi", "=", "param_util", ".", "cart2polar", "(", "x", ",", "y", ",", "center", "=", "np", ".", "array", "(", "[", "center_x", ",", "center_y", "]", ")", ")", "if", "complex_bool", "is", "True", ":", "return", "amp", "*", "self", ".", "_chi_n_m", "(", "r", ",", "beta", ",", "n", ",", "m", ")", "*", "np", ".", "exp", "(", "-", "1j", "*", "m", "*", "phi", ")", ".", "imag", "else", ":", "return", "amp", "*", "self", ".", "_chi_n_m", "(", "r", ",", "beta", ",", "n", ",", "m", ")", "*", "np", ".", "exp", "(", "-", "1j", "*", "m", "*", "phi", ")", ".", "real" ]
python
:param x: x-coordinate, numpy array :param y: y-ccordinate, numpy array :param amp: amplitude normalization :param beta: shaplet scale :param n: order of polynomial :param m: roational invariance :param center_x: center of shapelet :param center_y: center of shapelet :return: amplitude of shapelet at possition (x, y)
false
2,482,856
def replace_refund_transaction_by_id(cls, refund_transaction_id, refund_transaction, **kwargs): """Replace RefundTransaction Replace all attributes of RefundTransaction This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_refund_transaction_by_id(refund_transaction_id, refund_transaction, async=True) >>> result = thread.get() :param async bool :param str refund_transaction_id: ID of refundTransaction to replace (required) :param RefundTransaction refund_transaction: Attributes of refundTransaction to replace (required) :return: RefundTransaction If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._replace_refund_transaction_by_id_with_http_info(refund_transaction_id, refund_transaction, **kwargs) else: (data) = cls._replace_refund_transaction_by_id_with_http_info(refund_transaction_id, refund_transaction, **kwargs) return data
[ "def", "replace_refund_transaction_by_id", "(", "cls", ",", "refund_transaction_id", ",", "refund_transaction", ",", "**", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_replace_refund_transaction_by_id_with_http_info", "(", "refund_transaction_id", ",", "refund_transaction", ",", "**", "kwargs", ")", "else", ":", "(", "data", ")", "=", "cls", ".", "_replace_refund_transaction_by_id_with_http_info", "(", "refund_transaction_id", ",", "refund_transaction", ",", "**", "kwargs", ")", "return", "data" ]
python
Replace RefundTransaction Replace all attributes of RefundTransaction This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.replace_refund_transaction_by_id(refund_transaction_id, refund_transaction, async=True) >>> result = thread.get() :param async bool :param str refund_transaction_id: ID of refundTransaction to replace (required) :param RefundTransaction refund_transaction: Attributes of refundTransaction to replace (required) :return: RefundTransaction If the method is called asynchronously, returns the request thread.
false
2,471,726
def RenameInstance(r, instance, new_name, ip_check, name_check=None): """ Changes the name of an instance. @type instance: string @param instance: Instance name @type new_name: string @param new_name: New instance name @type ip_check: bool @param ip_check: Whether to ensure instance's IP address is inactive @type name_check: bool @param name_check: Whether to ensure instance's name is resolvable """ body = { "ip_check": ip_check, "new_name": new_name, } if name_check is not None: body["name_check"] = name_check return r.request("put", "/2/instances/%s/rename" % instance, content=body)
[ "def", "RenameInstance", "(", "r", ",", "instance", ",", "new_name", ",", "ip_check", ",", "name_check", "=", "None", ")", ":", "body", "=", "{", "\"ip_check\"", ":", "ip_check", ",", "\"new_name\"", ":", "new_name", ",", "}", "if", "name_check", "is", "not", "None", ":", "body", "[", "\"name_check\"", "]", "=", "name_check", "return", "r", ".", "request", "(", "\"put\"", ",", "\"/2/instances/%s/rename\"", "%", "instance", ",", "content", "=", "body", ")" ]
python
Changes the name of an instance. @type instance: string @param instance: Instance name @type new_name: string @param new_name: New instance name @type ip_check: bool @param ip_check: Whether to ensure instance's IP address is inactive @type name_check: bool @param name_check: Whether to ensure instance's name is resolvable
false
2,546,679
def edit_config_input_with_inactive(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") edit_config = ET.Element("edit_config") config = edit_config input = ET.SubElement(edit_config, "input") with_inactive = ET.SubElement(input, "with-inactive", xmlns="http://tail-f.com/ns/netconf/inactive/1.0") callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "edit_config_input_with_inactive", "(", "self", ",", "**", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "edit_config", "=", "ET", ".", "Element", "(", "\"edit_config\"", ")", "config", "=", "edit_config", "input", "=", "ET", ".", "SubElement", "(", "edit_config", ",", "\"input\"", ")", "with_inactive", "=", "ET", ".", "SubElement", "(", "input", ",", "\"with-inactive\"", ",", "xmlns", "=", "\"http://tail-f.com/ns/netconf/inactive/1.0\"", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
python
Auto Generated Code
false
1,780,239
def viz_live_trace(view): """ Given a Manticore trace file, highlight the basic blocks. """ tv = TraceVisualizer(view, None, live=True) if tv.workspace is None: tv.workspace = get_workspace() # update due to singleton in case we are called after a clear tv.live_update = True tv.visualize()
[ "def", "viz_live_trace", "(", "view", ")", ":", "tv", "=", "TraceVisualizer", "(", "view", ",", "None", ",", "live", "=", "True", ")", "if", "tv", ".", "workspace", "is", "None", ":", "tv", ".", "workspace", "=", "get_workspace", "(", ")", "tv", ".", "live_update", "=", "True", "tv", ".", "visualize", "(", ")" ]
python
Given a Manticore trace file, highlight the basic blocks.
false
1,993,110
def parse_type_str(expected_base=None, with_arrlist=False): """ Used by BaseCoder subclasses as a convenience for implementing the ``from_type_str`` method required by ``ABIRegistry``. Useful if normalizing then parsing a type string with an (optional) expected base is required in that method. """ def decorator(old_from_type_str): @functools.wraps(old_from_type_str) def new_from_type_str(cls, type_str, registry): normalized_type_str = normalize(type_str) abi_type = parse(normalized_type_str) type_str_repr = repr(type_str) if type_str != normalized_type_str: type_str_repr = '{} (normalized to {})'.format( type_str_repr, repr(normalized_type_str), ) if expected_base is not None: if not isinstance(abi_type, BasicType): raise ValueError( 'Cannot create {} for non-basic type {}'.format( cls.__name__, type_str_repr, ) ) if abi_type.base != expected_base: raise ValueError( 'Cannot create {} for type {}: expected type with ' "base '{}'".format( cls.__name__, type_str_repr, expected_base, ) ) if not with_arrlist and abi_type.arrlist is not None: raise ValueError( 'Cannot create {} for type {}: expected type with ' 'no array dimension list'.format( cls.__name__, type_str_repr, ) ) if with_arrlist and abi_type.arrlist is None: raise ValueError( 'Cannot create {} for type {}: expected type with ' 'array dimension list'.format( cls.__name__, type_str_repr, ) ) # Perform general validation of default solidity types abi_type.validate() return old_from_type_str(cls, abi_type, registry) return classmethod(new_from_type_str) return decorator
[ "def", "parse_type_str", "(", "expected_base", "=", "None", ",", "with_arrlist", "=", "False", ")", ":", "def", "decorator", "(", "old_from_type_str", ")", ":", "@", "functools", ".", "wraps", "(", "old_from_type_str", ")", "def", "new_from_type_str", "(", "cls", ",", "type_str", ",", "registry", ")", ":", "normalized_type_str", "=", "normalize", "(", "type_str", ")", "abi_type", "=", "parse", "(", "normalized_type_str", ")", "type_str_repr", "=", "repr", "(", "type_str", ")", "if", "type_str", "!=", "normalized_type_str", ":", "type_str_repr", "=", "'{} (normalized to {})'", ".", "format", "(", "type_str_repr", ",", "repr", "(", "normalized_type_str", ")", ",", ")", "if", "expected_base", "is", "not", "None", ":", "if", "not", "isinstance", "(", "abi_type", ",", "BasicType", ")", ":", "raise", "ValueError", "(", "'Cannot create {} for non-basic type {}'", ".", "format", "(", "cls", ".", "__name__", ",", "type_str_repr", ",", ")", ")", "if", "abi_type", ".", "base", "!=", "expected_base", ":", "raise", "ValueError", "(", "'Cannot create {} for type {}: expected type with '", "\"base '{}'\"", ".", "format", "(", "cls", ".", "__name__", ",", "type_str_repr", ",", "expected_base", ",", ")", ")", "if", "not", "with_arrlist", "and", "abi_type", ".", "arrlist", "is", "not", "None", ":", "raise", "ValueError", "(", "'Cannot create {} for type {}: expected type with '", "'no array dimension list'", ".", "format", "(", "cls", ".", "__name__", ",", "type_str_repr", ",", ")", ")", "if", "with_arrlist", "and", "abi_type", ".", "arrlist", "is", "None", ":", "raise", "ValueError", "(", "'Cannot create {} for type {}: expected type with '", "'array dimension list'", ".", "format", "(", "cls", ".", "__name__", ",", "type_str_repr", ",", ")", ")", "abi_type", ".", "validate", "(", ")", "return", "old_from_type_str", "(", "cls", ",", "abi_type", ",", "registry", ")", "return", "classmethod", "(", "new_from_type_str", ")", "return", "decorator" ]
python
Used by BaseCoder subclasses as a convenience for implementing the ``from_type_str`` method required by ``ABIRegistry``. Useful if normalizing then parsing a type string with an (optional) expected base is required in that method.
false
2,163,277
def get_idiomatic_name_in_language(cls, name, language): """ Get the name for the given language Args: name (str): the name to convert language (str): the language to use Returns: a name in the given language Example: get_idiomatic_name_in_language("EnterpriseNetwork", "python") >>> enterprise_network """ if language in cls.idiomatic_methods_cache: m = cls.idiomatic_methods_cache[language] if not m: return name return m(name) found, method = load_language_plugins(language, 'get_idiomatic_name') if found: cls.idiomatic_methods_cache[language] = method if method: return method(name) else: return name module = importlib.import_module('.lang.%s' % language, package="monolithe.generators") if not hasattr(module, 'get_idiomatic_name'): cls.idiomatic_methods_cache[language] = None return name method = getattr(module, 'get_idiomatic_name') cls.idiomatic_methods_cache[language] = method return method(name)
[ "def", "get_idiomatic_name_in_language", "(", "cls", ",", "name", ",", "language", ")", ":", "if", "language", "in", "cls", ".", "idiomatic_methods_cache", ":", "m", "=", "cls", ".", "idiomatic_methods_cache", "[", "language", "]", "if", "not", "m", ":", "return", "name", "return", "m", "(", "name", ")", "found", ",", "method", "=", "load_language_plugins", "(", "language", ",", "'get_idiomatic_name'", ")", "if", "found", ":", "cls", ".", "idiomatic_methods_cache", "[", "language", "]", "=", "method", "if", "method", ":", "return", "method", "(", "name", ")", "else", ":", "return", "name", "module", "=", "importlib", ".", "import_module", "(", "'.lang.%s'", "%", "language", ",", "package", "=", "\"monolithe.generators\"", ")", "if", "not", "hasattr", "(", "module", ",", "'get_idiomatic_name'", ")", ":", "cls", ".", "idiomatic_methods_cache", "[", "language", "]", "=", "None", "return", "name", "method", "=", "getattr", "(", "module", ",", "'get_idiomatic_name'", ")", "cls", ".", "idiomatic_methods_cache", "[", "language", "]", "=", "method", "return", "method", "(", "name", ")" ]
python
Get the name for the given language Args: name (str): the name to convert language (str): the language to use Returns: a name in the given language Example: get_idiomatic_name_in_language("EnterpriseNetwork", "python") >>> enterprise_network
false
2,389,428
def flush_cache(self): ''' Use a cache to save state changes to avoid opening a session for every change. The cache will be flushed at the end of the simulation, and when history is accessed. ''' logger.debug('Flushing cache {}'.format(self.db_path)) with self.db: for rec in self._tups: self.db.execute("replace into history(agent_id, t_step, key, value) values (?, ?, ?, ?)", (rec.agent_id, rec.t_step, rec.key, rec.value)) self._tups = list()
[ "def", "flush_cache", "(", "self", ")", ":", "logger", ".", "debug", "(", "'Flushing cache {}'", ".", "format", "(", "self", ".", "db_path", ")", ")", "with", "self", ".", "db", ":", "for", "rec", "in", "self", ".", "_tups", ":", "self", ".", "db", ".", "execute", "(", "\"replace into history(agent_id, t_step, key, value) values (?, ?, ?, ?)\"", ",", "(", "rec", ".", "agent_id", ",", "rec", ".", "t_step", ",", "rec", ".", "key", ",", "rec", ".", "value", ")", ")", "self", ".", "_tups", "=", "list", "(", ")" ]
python
Use a cache to save state changes to avoid opening a session for every change. The cache will be flushed at the end of the simulation, and when history is accessed.
false
2,022,745
def discardi(self, begin, end, data=None): """ Shortcut for discard(Interval(begin, end, data)). Completes in O(log n) time. """ return self.discard(Interval(begin, end, data))
[ "def", "discardi", "(", "self", ",", "begin", ",", "end", ",", "data", "=", "None", ")", ":", "return", "self", ".", "discard", "(", "Interval", "(", "begin", ",", "end", ",", "data", ")", ")" ]
python
Shortcut for discard(Interval(begin, end, data)). Completes in O(log n) time.
false
1,668,312
def search_dict(data, key): """ Search for a key in a nested dict, or list of nested dicts, and return the values. :param data: dict/list to search :param key: key to find :return: matches for key """ if isinstance(data, dict): for dkey, value in data.items(): if dkey == key: yield value for result in search_dict(value, key): yield result elif isinstance(data, list): for value in data: for result in search_dict(value, key): yield result
[ "def", "search_dict", "(", "data", ",", "key", ")", ":", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "for", "dkey", ",", "value", "in", "data", ".", "items", "(", ")", ":", "if", "dkey", "==", "key", ":", "yield", "value", "for", "result", "in", "search_dict", "(", "value", ",", "key", ")", ":", "yield", "result", "elif", "isinstance", "(", "data", ",", "list", ")", ":", "for", "value", "in", "data", ":", "for", "result", "in", "search_dict", "(", "value", ",", "key", ")", ":", "yield", "result" ]
python
Search for a key in a nested dict, or list of nested dicts, and return the values. :param data: dict/list to search :param key: key to find :return: matches for key
false
2,060,002
def submitTemplate(id, data={}): """ Submit an existing Template. Args: `id`: ID of the template to submit `data`: json data containing the input_vars Returns: Dictionary containing Command Object details. """ conn = Qubole.agent() path = str(id) + "/run" return conn.post(Template.element_path(path), data)
[ "def", "submitTemplate", "(", "id", ",", "data", "=", "{", "}", ")", ":", "conn", "=", "Qubole", ".", "agent", "(", ")", "path", "=", "str", "(", "id", ")", "+", "\"/run\"", "return", "conn", ".", "post", "(", "Template", ".", "element_path", "(", "path", ")", ",", "data", ")" ]
python
Submit an existing Template. Args: `id`: ID of the template to submit `data`: json data containing the input_vars Returns: Dictionary containing Command Object details.
false
2,457,603
def cmd(send, msg, args): """Changes the output filter. Syntax: {command} [--channel channel] <filter|--show|--list|--reset|--chain filter,[filter2,...]> """ if args['type'] == 'privmsg': send('Filters must be set in channels, not via private message.') return isadmin = args['is_admin'](args['nick']) parser = arguments.ArgParser(args['config']) parser.add_argument('--channel', nargs='?', default=args['target']) group = parser.add_mutually_exclusive_group() group.add_argument('filter', nargs='?') group.add_argument('--show', action='store_true') group.add_argument('--list', action='store_true') group.add_argument('--reset', '--clear', action='store_true') group.add_argument('--chain') if not msg: send(get_filters(args['handler'], args['target'])) return try: cmdargs = parser.parse_args(msg) except arguments.ArgumentException as e: send(str(e)) return if cmdargs.list: send("Available filters are %s" % ", ".join(textutils.output_filters.keys())) elif cmdargs.reset and isadmin: args['handler'].outputfilter[cmdargs.channel].clear() send("Okay!") elif cmdargs.chain and isadmin: if not args['handler'].outputfilter[cmdargs.channel]: send("Must have a filter set in order to chain.") return filter_list, output = textutils.append_filters(cmdargs.chain) if filter_list is not None: args['handler'].outputfilter[cmdargs.channel].extend(filter_list) send(output) elif cmdargs.show: send(get_filters(args['handler'], cmdargs.channel)) elif isadmin: # If we're just adding a filter without chain, blow away any existing filters. filter_list, output = textutils.append_filters(cmdargs.filter) if filter_list is not None: args['handler'].outputfilter[cmdargs.channel].clear() args['handler'].outputfilter[cmdargs.channel].extend(filter_list) send(output) else: send('This command requires admin privileges.')
[ "def", "cmd", "(", "send", ",", "msg", ",", "args", ")", ":", "if", "args", "[", "'type'", "]", "==", "'privmsg'", ":", "send", "(", "'Filters must be set in channels, not via private message.'", ")", "return", "isadmin", "=", "args", "[", "'is_admin'", "]", "(", "args", "[", "'nick'", "]", ")", "parser", "=", "arguments", ".", "ArgParser", "(", "args", "[", "'config'", "]", ")", "parser", ".", "add_argument", "(", "'--channel'", ",", "nargs", "=", "'?'", ",", "default", "=", "args", "[", "'target'", "]", ")", "group", "=", "parser", ".", "add_mutually_exclusive_group", "(", ")", "group", ".", "add_argument", "(", "'filter'", ",", "nargs", "=", "'?'", ")", "group", ".", "add_argument", "(", "'--show'", ",", "action", "=", "'store_true'", ")", "group", ".", "add_argument", "(", "'--list'", ",", "action", "=", "'store_true'", ")", "group", ".", "add_argument", "(", "'--reset'", ",", "'--clear'", ",", "action", "=", "'store_true'", ")", "group", ".", "add_argument", "(", "'--chain'", ")", "if", "not", "msg", ":", "send", "(", "get_filters", "(", "args", "[", "'handler'", "]", ",", "args", "[", "'target'", "]", ")", ")", "return", "try", ":", "cmdargs", "=", "parser", ".", "parse_args", "(", "msg", ")", "except", "arguments", ".", "ArgumentException", "as", "e", ":", "send", "(", "str", "(", "e", ")", ")", "return", "if", "cmdargs", ".", "list", ":", "send", "(", "\"Available filters are %s\"", "%", "\", \"", ".", "join", "(", "textutils", ".", "output_filters", ".", "keys", "(", ")", ")", ")", "elif", "cmdargs", ".", "reset", "and", "isadmin", ":", "args", "[", "'handler'", "]", ".", "outputfilter", "[", "cmdargs", ".", "channel", "]", ".", "clear", "(", ")", "send", "(", "\"Okay!\"", ")", "elif", "cmdargs", ".", "chain", "and", "isadmin", ":", "if", "not", "args", "[", "'handler'", "]", ".", "outputfilter", "[", "cmdargs", ".", "channel", "]", ":", "send", "(", "\"Must have a filter set in order to chain.\"", ")", "return", "filter_list", ",", "output", "=", "textutils", ".", "append_filters", "(", "cmdargs", ".", "chain", ")", "if", "filter_list", "is", "not", "None", ":", "args", "[", "'handler'", "]", ".", "outputfilter", "[", "cmdargs", ".", "channel", "]", ".", "extend", "(", "filter_list", ")", "send", "(", "output", ")", "elif", "cmdargs", ".", "show", ":", "send", "(", "get_filters", "(", "args", "[", "'handler'", "]", ",", "cmdargs", ".", "channel", ")", ")", "elif", "isadmin", ":", "filter_list", ",", "output", "=", "textutils", ".", "append_filters", "(", "cmdargs", ".", "filter", ")", "if", "filter_list", "is", "not", "None", ":", "args", "[", "'handler'", "]", ".", "outputfilter", "[", "cmdargs", ".", "channel", "]", ".", "clear", "(", ")", "args", "[", "'handler'", "]", ".", "outputfilter", "[", "cmdargs", ".", "channel", "]", ".", "extend", "(", "filter_list", ")", "send", "(", "output", ")", "else", ":", "send", "(", "'This command requires admin privileges.'", ")" ]
python
Changes the output filter. Syntax: {command} [--channel channel] <filter|--show|--list|--reset|--chain filter,[filter2,...]>
false
2,265,263
def __create_coordinates(self, lat, lon, elev): """ GeoJSON standard: Use to determine 2-point or 4-point coordinates :param list lat: :param list lon: :return dict: """ # Sort lat an lon in numerical order lat.sort() lon.sort() geo_dict = {} # 4 coordinate values if len(lat) == 2 and len(lon) == 2: # Matching coordinate pairs. Not 4 unique values. if lat[0] == lat[1] and lon[0] == lon[1]: logger_noaa_lpd.info("coordinates found: {}".format("2")) lat.pop() lon.pop() geo_dict = self.__geo_point(lat, lon, elev) # 4 unique coordinates else: logger_noaa_lpd.info("coordinates found: {}".format("4")) geo_dict = self.__geo_multipoint(lat, lon, elev) # 2 coordinate values elif len(lat) == 1 and len(lon) == 1: logger_noaa_lpd.info("coordinates found: {}".format("2")) geo_dict = self.__geo_point(lat, lon, elev) # 0 coordinate values elif not lat and not lon: logger_noaa_lpd.info("coordinates found: {}".format("0")) else: geo_dict = {} logger_noaa_lpd.info("coordinates found: {}".format("too many")) return geo_dict
[ "def", "__create_coordinates", "(", "self", ",", "lat", ",", "lon", ",", "elev", ")", ":", "lat", ".", "sort", "(", ")", "lon", ".", "sort", "(", ")", "geo_dict", "=", "{", "}", "if", "len", "(", "lat", ")", "==", "2", "and", "len", "(", "lon", ")", "==", "2", ":", "if", "lat", "[", "0", "]", "==", "lat", "[", "1", "]", "and", "lon", "[", "0", "]", "==", "lon", "[", "1", "]", ":", "logger_noaa_lpd", ".", "info", "(", "\"coordinates found: {}\"", ".", "format", "(", "\"2\"", ")", ")", "lat", ".", "pop", "(", ")", "lon", ".", "pop", "(", ")", "geo_dict", "=", "self", ".", "__geo_point", "(", "lat", ",", "lon", ",", "elev", ")", "else", ":", "logger_noaa_lpd", ".", "info", "(", "\"coordinates found: {}\"", ".", "format", "(", "\"4\"", ")", ")", "geo_dict", "=", "self", ".", "__geo_multipoint", "(", "lat", ",", "lon", ",", "elev", ")", "elif", "len", "(", "lat", ")", "==", "1", "and", "len", "(", "lon", ")", "==", "1", ":", "logger_noaa_lpd", ".", "info", "(", "\"coordinates found: {}\"", ".", "format", "(", "\"2\"", ")", ")", "geo_dict", "=", "self", ".", "__geo_point", "(", "lat", ",", "lon", ",", "elev", ")", "elif", "not", "lat", "and", "not", "lon", ":", "logger_noaa_lpd", ".", "info", "(", "\"coordinates found: {}\"", ".", "format", "(", "\"0\"", ")", ")", "else", ":", "geo_dict", "=", "{", "}", "logger_noaa_lpd", ".", "info", "(", "\"coordinates found: {}\"", ".", "format", "(", "\"too many\"", ")", ")", "return", "geo_dict" ]
python
GeoJSON standard: Use to determine 2-point or 4-point coordinates :param list lat: :param list lon: :return dict:
false
1,928,185
def mecab_tokenize(text, lang): """ Use the mecab-python3 package to tokenize the given text. The `lang` must be 'ja' for Japanese or 'ko' for Korean. The simplest output from mecab-python3 is the single-string form, which contains the same table that the command-line version of MeCab would output. We find the tokens in the first column of this table. """ if lang not in MECAB_DICTIONARY_NAMES: raise ValueError("Can't run MeCab on language %r" % lang) if lang not in MECAB_ANALYZERS: MECAB_ANALYZERS[lang] = make_mecab_analyzer(MECAB_DICTIONARY_NAMES[lang]) analyzer = MECAB_ANALYZERS[lang] text = unicodedata.normalize('NFKC', text.strip()) analyzed = analyzer.parse(text) if not analyzed: return [] return [line.split('\t')[0] for line in analyzed.split('\n') if line != '' and line != 'EOS']
[ "def", "mecab_tokenize", "(", "text", ",", "lang", ")", ":", "if", "lang", "not", "in", "MECAB_DICTIONARY_NAMES", ":", "raise", "ValueError", "(", "\"Can't run MeCab on language %r\"", "%", "lang", ")", "if", "lang", "not", "in", "MECAB_ANALYZERS", ":", "MECAB_ANALYZERS", "[", "lang", "]", "=", "make_mecab_analyzer", "(", "MECAB_DICTIONARY_NAMES", "[", "lang", "]", ")", "analyzer", "=", "MECAB_ANALYZERS", "[", "lang", "]", "text", "=", "unicodedata", ".", "normalize", "(", "'NFKC'", ",", "text", ".", "strip", "(", ")", ")", "analyzed", "=", "analyzer", ".", "parse", "(", "text", ")", "if", "not", "analyzed", ":", "return", "[", "]", "return", "[", "line", ".", "split", "(", "'\\t'", ")", "[", "0", "]", "for", "line", "in", "analyzed", ".", "split", "(", "'\\n'", ")", "if", "line", "!=", "''", "and", "line", "!=", "'EOS'", "]" ]
python
Use the mecab-python3 package to tokenize the given text. The `lang` must be 'ja' for Japanese or 'ko' for Korean. The simplest output from mecab-python3 is the single-string form, which contains the same table that the command-line version of MeCab would output. We find the tokens in the first column of this table.
false
1,585,180
def call(self, inputs): """Call `Layer`.""" # if context.executing_eagerly(): # if not self.initialized: # self._data_dep_init(inputs) self._compute_weights() # Recompute weights for each forward pass output = self.layer.call(inputs) return output
[ "def", "call", "(", "self", ",", "inputs", ")", ":", "self", ".", "_compute_weights", "(", ")", "output", "=", "self", ".", "layer", ".", "call", "(", "inputs", ")", "return", "output" ]
python
Call `Layer`.
false
2,504,667
def __init__( self, attrs=None, mode=None, theme=None, config=None, dependencies=(), js_var_format=None, addon_js=(), addon_css=(), custom_mode=None, custom_js=(), keymap=None, custom_css=None, **kwargs): u"""Constructor of CodeMirrorTextarea Attribute: path - CodeMirror directory URI (DEFAULT = settings.CODEMIRROR_PATH) mode - Name of language or a modal configuration object as described in CodeMirror docs. Used to autoload an appropriate language plugin js file according to filename conventions. (DEFAULT = settings.CODEMIRROR_MODE) theme - Name of theme. Also autoloads theme plugin css according to filename conventions. (DEFAULT = settings.CODEMIRROR_THEME) config - The rest of the options passed into CodeMirror as a python map. (updated from settings.CODEMIRROR_CONFIG) dependencies - Some modes depend on others, you can pass extra modes dependencies with this argument. For example for mode="htmlmixed", you must pass dependencies=("xml", "javascript", "css"). js_var_format - A format string interpolated with the form field name to name a global JS variable that will hold the CodeMirror editor object. For example with js_var_format="%s_editor" and a field named "code", the JS variable name would be "code_editor". If None is passed, no global variable is created (DEFAULT = settings.CODEMIRROR_JS_VAR_FORMAT) addon_js - Various addons are available for CodeMirror. You can pass the names of any addons to load with this argument. For example, for mode="django", you must pass addon_js=("mode/overlay", ). addon_css - Some addons require corresponding CSS files. Since not every addon requires a CSS file, and the names of these files do not always follow a convention, they must be listed separately. For example, addon_css=("display/fullscreen", ). custom_mode - To use a custom mode (i.e. one not included in the standard CodeMirror distribution), set this to the name, or configuration object, of the mode, and ensure "mode" is None. For example, custom_mode="my_custom_mode". custom_js - To include other Javascript files with this widget that are not defined in the CodeMirror package, set this to a list of pathnames. If "custom_mode" is defined, this will probably contain the path of the file defining that mode. Paths in this list will not be prepended with settings.CODEMIRROR_PATH. For example, custom_js=("site_js/my_custom_mode.js", ) keymap - The name of a keymap to use. Keymaps are located in settings.CODEMIRROR_PATH/keymap. Default: None. custom_css - To include other CSS files with this widget that are not defined in the CodeMirror package, set this to a list of pathnames. Paths in this list will not be prepended with any path. For example, custom_css=("site_css/my_styles.css", ) Example: *-------------------------------* + static + codemirror + lib - codemirror.js - codemirror.css + mode + python - python.js + theme + cobalt.css + addon + display - fullscreen.js - fullscreen.css + site_js - my_custom_mode.js *-------------------------------* CODEMIRROR_PATH = "codemirror" codemirror = CodeMirrorTextarea(mode="python", theme="cobalt", config={ 'fixedGutter': True }) document = forms.TextField(widget=codemirror) """ super(CodeMirrorTextarea, self).__init__(attrs=attrs, **kwargs) mode = mode or custom_mode or CODEMIRROR_MODE if utils.isstring(mode): mode = { 'name': mode } self.mode_name = mode['name'] self.custom_mode = custom_mode self.dependencies = dependencies self.addon_js = addon_js self.addon_css = addon_css self.custom_js = custom_js self.custom_css = custom_css self.keymap = keymap self.js_var_format = js_var_format or CODEMIRROR_JS_VAR_FORMAT theme = theme or CODEMIRROR_THEME theme_css_filename = THEME_CSS_FILENAME_RE.search(theme).group(0) if theme_css_filename == 'default': self.theme_css = [] else: self.theme_css = [theme_css_filename] config = config or {} self.option_json = utils.CodeMirrorJSONEncoder().encode(dict(chain( CODEMIRROR_CONFIG.items(), config.items(), [('mode', mode), ('theme', theme)])))
[ "def", "__init__", "(", "self", ",", "attrs", "=", "None", ",", "mode", "=", "None", ",", "theme", "=", "None", ",", "config", "=", "None", ",", "dependencies", "=", "(", ")", ",", "js_var_format", "=", "None", ",", "addon_js", "=", "(", ")", ",", "addon_css", "=", "(", ")", ",", "custom_mode", "=", "None", ",", "custom_js", "=", "(", ")", ",", "keymap", "=", "None", ",", "custom_css", "=", "None", ",", "**", "kwargs", ")", ":", "super", "(", "CodeMirrorTextarea", ",", "self", ")", ".", "__init__", "(", "attrs", "=", "attrs", ",", "**", "kwargs", ")", "mode", "=", "mode", "or", "custom_mode", "or", "CODEMIRROR_MODE", "if", "utils", ".", "isstring", "(", "mode", ")", ":", "mode", "=", "{", "'name'", ":", "mode", "}", "self", ".", "mode_name", "=", "mode", "[", "'name'", "]", "self", ".", "custom_mode", "=", "custom_mode", "self", ".", "dependencies", "=", "dependencies", "self", ".", "addon_js", "=", "addon_js", "self", ".", "addon_css", "=", "addon_css", "self", ".", "custom_js", "=", "custom_js", "self", ".", "custom_css", "=", "custom_css", "self", ".", "keymap", "=", "keymap", "self", ".", "js_var_format", "=", "js_var_format", "or", "CODEMIRROR_JS_VAR_FORMAT", "theme", "=", "theme", "or", "CODEMIRROR_THEME", "theme_css_filename", "=", "THEME_CSS_FILENAME_RE", ".", "search", "(", "theme", ")", ".", "group", "(", "0", ")", "if", "theme_css_filename", "==", "'default'", ":", "self", ".", "theme_css", "=", "[", "]", "else", ":", "self", ".", "theme_css", "=", "[", "theme_css_filename", "]", "config", "=", "config", "or", "{", "}", "self", ".", "option_json", "=", "utils", ".", "CodeMirrorJSONEncoder", "(", ")", ".", "encode", "(", "dict", "(", "chain", "(", "CODEMIRROR_CONFIG", ".", "items", "(", ")", ",", "config", ".", "items", "(", ")", ",", "[", "(", "'mode'", ",", "mode", ")", ",", "(", "'theme'", ",", "theme", ")", "]", ")", ")", ")" ]
python
u"""Constructor of CodeMirrorTextarea Attribute: path - CodeMirror directory URI (DEFAULT = settings.CODEMIRROR_PATH) mode - Name of language or a modal configuration object as described in CodeMirror docs. Used to autoload an appropriate language plugin js file according to filename conventions. (DEFAULT = settings.CODEMIRROR_MODE) theme - Name of theme. Also autoloads theme plugin css according to filename conventions. (DEFAULT = settings.CODEMIRROR_THEME) config - The rest of the options passed into CodeMirror as a python map. (updated from settings.CODEMIRROR_CONFIG) dependencies - Some modes depend on others, you can pass extra modes dependencies with this argument. For example for mode="htmlmixed", you must pass dependencies=("xml", "javascript", "css"). js_var_format - A format string interpolated with the form field name to name a global JS variable that will hold the CodeMirror editor object. For example with js_var_format="%s_editor" and a field named "code", the JS variable name would be "code_editor". If None is passed, no global variable is created (DEFAULT = settings.CODEMIRROR_JS_VAR_FORMAT) addon_js - Various addons are available for CodeMirror. You can pass the names of any addons to load with this argument. For example, for mode="django", you must pass addon_js=("mode/overlay", ). addon_css - Some addons require corresponding CSS files. Since not every addon requires a CSS file, and the names of these files do not always follow a convention, they must be listed separately. For example, addon_css=("display/fullscreen", ). custom_mode - To use a custom mode (i.e. one not included in the standard CodeMirror distribution), set this to the name, or configuration object, of the mode, and ensure "mode" is None. For example, custom_mode="my_custom_mode". custom_js - To include other Javascript files with this widget that are not defined in the CodeMirror package, set this to a list of pathnames. If "custom_mode" is defined, this will probably contain the path of the file defining that mode. Paths in this list will not be prepended with settings.CODEMIRROR_PATH. For example, custom_js=("site_js/my_custom_mode.js", ) keymap - The name of a keymap to use. Keymaps are located in settings.CODEMIRROR_PATH/keymap. Default: None. custom_css - To include other CSS files with this widget that are not defined in the CodeMirror package, set this to a list of pathnames. Paths in this list will not be prepended with any path. For example, custom_css=("site_css/my_styles.css", ) Example: *-------------------------------* + static + codemirror + lib - codemirror.js - codemirror.css + mode + python - python.js + theme + cobalt.css + addon + display - fullscreen.js - fullscreen.css + site_js - my_custom_mode.js *-------------------------------* CODEMIRROR_PATH = "codemirror" codemirror = CodeMirrorTextarea(mode="python", theme="cobalt", config={ 'fixedGutter': True }) document = forms.TextField(widget=codemirror)
false
2,634,733
def __init__(self, parent, *args, **kwargs): """ Initializes the class. :param parent: Object parent. :type parent: QObject :param \*args: Arguments. :type \*args: \* :param \*\*kwargs: Keywords arguments. :type \*\*kwargs: \*\* """ LOGGER.debug("> Initializing '{0}()' class.".format(self.__class__.__name__)) QSortFilterProxyModel.__init__(self, parent, *args, **kwargs) # --- Setting class attributes. --- color = "rgb({0}, {1}, {2})" self.__editor_node_format = "<span>{0}</span>" self.__file_node_format = "<span style=\"color: {0};\">{{0}}</span>".format(color.format(160, 160, 160)) self.__directory_node_format = "{0}" self.__project_node_format = "<b>{0}</b>" self.__default_project_node_format = "<b>Open Files</b>"
[ "def", "__init__", "(", "self", ",", "parent", ",", "*", "args", ",", "**", "kwargs", ")", ":", "LOGGER", ".", "debug", "(", "\"> Initializing '{0}()' class.\"", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ")", ")", "QSortFilterProxyModel", ".", "__init__", "(", "self", ",", "parent", ",", "*", "args", ",", "**", "kwargs", ")", "color", "=", "\"rgb({0}, {1}, {2})\"", "self", ".", "__editor_node_format", "=", "\"<span>{0}</span>\"", "self", ".", "__file_node_format", "=", "\"<span style=\\\"color: {0};\\\">{{0}}</span>\"", ".", "format", "(", "color", ".", "format", "(", "160", ",", "160", ",", "160", ")", ")", "self", ".", "__directory_node_format", "=", "\"{0}\"", "self", ".", "__project_node_format", "=", "\"<b>{0}</b>\"", "self", ".", "__default_project_node_format", "=", "\"<b>Open Files</b>\"" ]
python
Initializes the class. :param parent: Object parent. :type parent: QObject :param \*args: Arguments. :type \*args: \* :param \*\*kwargs: Keywords arguments. :type \*\*kwargs: \*\*
false
1,667,708
def build_output_directory(self, path): ''' Set the output directory for extracted files. @path - The path to the file that data will be extracted from. Returns None. ''' # If we have not already created an output directory for this target # file, create one now if not has_key(self.extraction_directories, path): basedir = os.path.dirname(path) basename = os.path.basename(path) if basedir != self.directory: # During recursive extraction, extracted files will be in subdirectories # of the CWD. This allows us to figure out the subdirectory by simply # splitting the target file's base directory on our known CWD. # # However, the very *first* file being scanned is not necessarily in the # CWD, so this will raise an IndexError. This is easy to handle though, # since the very first file being scanned needs to have its contents # extracted to ${CWD}/_basename.extracted, so we just set the subdir # variable to a blank string when an IndexError is encountered. try: subdir = basedir.split(self.directory)[1][1:] except IndexError as e: subdir = "" else: subdir = "" if self.output_directory_override: output_directory = os.path.join(self.directory, subdir, self.output_directory_override) else: outdir = os.path.join(self.directory, subdir, '_' + basename) output_directory = unique_file_name(outdir, extension='extracted') if not os.path.exists(output_directory): os.mkdir(output_directory) self.extraction_directories[path] = output_directory self.output[path].directory = os.path.realpath(output_directory) + os.path.sep # Else, just use the already created directory else: output_directory = self.extraction_directories[path] return output_directory
[ "def", "build_output_directory", "(", "self", ",", "path", ")", ":", "if", "not", "has_key", "(", "self", ".", "extraction_directories", ",", "path", ")", ":", "basedir", "=", "os", ".", "path", ".", "dirname", "(", "path", ")", "basename", "=", "os", ".", "path", ".", "basename", "(", "path", ")", "if", "basedir", "!=", "self", ".", "directory", ":", "try", ":", "subdir", "=", "basedir", ".", "split", "(", "self", ".", "directory", ")", "[", "1", "]", "[", "1", ":", "]", "except", "IndexError", "as", "e", ":", "subdir", "=", "\"\"", "else", ":", "subdir", "=", "\"\"", "if", "self", ".", "output_directory_override", ":", "output_directory", "=", "os", ".", "path", ".", "join", "(", "self", ".", "directory", ",", "subdir", ",", "self", ".", "output_directory_override", ")", "else", ":", "outdir", "=", "os", ".", "path", ".", "join", "(", "self", ".", "directory", ",", "subdir", ",", "'_'", "+", "basename", ")", "output_directory", "=", "unique_file_name", "(", "outdir", ",", "extension", "=", "'extracted'", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "output_directory", ")", ":", "os", ".", "mkdir", "(", "output_directory", ")", "self", ".", "extraction_directories", "[", "path", "]", "=", "output_directory", "self", ".", "output", "[", "path", "]", ".", "directory", "=", "os", ".", "path", ".", "realpath", "(", "output_directory", ")", "+", "os", ".", "path", ".", "sep", "else", ":", "output_directory", "=", "self", ".", "extraction_directories", "[", "path", "]", "return", "output_directory" ]
python
Set the output directory for extracted files. @path - The path to the file that data will be extracted from. Returns None.
false
2,098,729
def load_data(self, data_np): """ Load raw numpy data into the viewer. """ image = AstroImage.AstroImage(logger=self.logger) image.set_data(data_np) self.set_image(image)
[ "def", "load_data", "(", "self", ",", "data_np", ")", ":", "image", "=", "AstroImage", ".", "AstroImage", "(", "logger", "=", "self", ".", "logger", ")", "image", ".", "set_data", "(", "data_np", ")", "self", ".", "set_image", "(", "image", ")" ]
python
Load raw numpy data into the viewer.
false
2,637,792
def run(self, handler): """Start bottle server.""" import eventlet.patcher if not eventlet.patcher.is_monkey_patched(os): msg = ("%s requires eventlet.monkey_patch() (before " "import)" % self.__class__.__name__) raise RuntimeError(msg) # Separate out wsgi.server arguments wsgi_args = {} for arg in ('log', 'environ', 'max_size', 'max_http_version', 'protocol', 'server_event', 'minimum_chunk_size', 'log_x_forwarded_for', 'custom_pool', 'keepalive', 'log_output', 'log_format', 'url_length_limit', 'debug', 'socket_timeout', 'capitalize_response_headers'): try: wsgi_args[arg] = self.options.pop(arg) except KeyError: pass if 'log_output' not in wsgi_args: wsgi_args['log_output'] = not self.quiet import eventlet.wsgi sock = self.options.pop('shared_socket', None) or self.get_socket() eventlet.wsgi.server(sock, handler, **wsgi_args)
[ "def", "run", "(", "self", ",", "handler", ")", ":", "import", "eventlet", ".", "patcher", "if", "not", "eventlet", ".", "patcher", ".", "is_monkey_patched", "(", "os", ")", ":", "msg", "=", "(", "\"%s requires eventlet.monkey_patch() (before \"", "\"import)\"", "%", "self", ".", "__class__", ".", "__name__", ")", "raise", "RuntimeError", "(", "msg", ")", "wsgi_args", "=", "{", "}", "for", "arg", "in", "(", "'log'", ",", "'environ'", ",", "'max_size'", ",", "'max_http_version'", ",", "'protocol'", ",", "'server_event'", ",", "'minimum_chunk_size'", ",", "'log_x_forwarded_for'", ",", "'custom_pool'", ",", "'keepalive'", ",", "'log_output'", ",", "'log_format'", ",", "'url_length_limit'", ",", "'debug'", ",", "'socket_timeout'", ",", "'capitalize_response_headers'", ")", ":", "try", ":", "wsgi_args", "[", "arg", "]", "=", "self", ".", "options", ".", "pop", "(", "arg", ")", "except", "KeyError", ":", "pass", "if", "'log_output'", "not", "in", "wsgi_args", ":", "wsgi_args", "[", "'log_output'", "]", "=", "not", "self", ".", "quiet", "import", "eventlet", ".", "wsgi", "sock", "=", "self", ".", "options", ".", "pop", "(", "'shared_socket'", ",", "None", ")", "or", "self", ".", "get_socket", "(", ")", "eventlet", ".", "wsgi", ".", "server", "(", "sock", ",", "handler", ",", "**", "wsgi_args", ")" ]
python
Start bottle server.
false
2,061,608
def get_all(self, start=0, count=-1, sort=''): """ Gets a list of logical interconnects based on optional sorting and filtering and is constrained by start and count parameters. Args: start: The first item to return, using 0-based indexing. If not specified, the default is 0 - start with the first available item. count: The number of resources to return. A count of -1 requests all items. The actual number of items in the response might differ from the requested count if the sum of start and count exceeds the total number of items. sort: The sort order of the returned data set. By default, the sort order is based on create time with the oldest entry first. Returns: list: A list of logical interconnects. """ return self._helper.get_all(start, count, sort=sort)
[ "def", "get_all", "(", "self", ",", "start", "=", "0", ",", "count", "=", "-", "1", ",", "sort", "=", "''", ")", ":", "return", "self", ".", "_helper", ".", "get_all", "(", "start", ",", "count", ",", "sort", "=", "sort", ")" ]
python
Gets a list of logical interconnects based on optional sorting and filtering and is constrained by start and count parameters. Args: start: The first item to return, using 0-based indexing. If not specified, the default is 0 - start with the first available item. count: The number of resources to return. A count of -1 requests all items. The actual number of items in the response might differ from the requested count if the sum of start and count exceeds the total number of items. sort: The sort order of the returned data set. By default, the sort order is based on create time with the oldest entry first. Returns: list: A list of logical interconnects.
false
2,519,152
def run(self): """Run flux consistency check command""" # Load compound information def compound_name(id): if id not in self._model.compounds: return id return self._model.compounds[id].properties.get('name', id) epsilon = self._args.epsilon if self._args.unrestricted: # Allow all exchange reactions with no flux limits for reaction in self._mm.reactions: if self._mm.is_exchange(reaction): del self._mm.limits[reaction].bounds loop_removal = self._get_loop_removal_option() enable_tfba = loop_removal == 'tfba' enable_fastcore = self._args.fastcore if enable_tfba and enable_fastcore: self.argument_error( 'Using Fastcore with thermodynamic constraints' ' is not supported!') start_time = time.time() if enable_fastcore: solver = self._get_solver() try: inconsistent = set(fastcore.fastcc( self._mm, epsilon, solver=solver)) except fluxanalysis.FluxBalanceError as e: self.report_flux_balance_error(e) else: if enable_tfba: solver = self._get_solver(integer=True) else: solver = self._get_solver() if self._args.reduce_lp: logger.info('Running with reduced number of LP problems.') try: inconsistent = set( fluxanalysis.consistency_check( self._mm, self._mm.reactions, epsilon, tfba=enable_tfba, solver=solver)) except fluxanalysis.FluxBalanceError as e: self.report_flux_balance_error(e) else: logger.info('Using flux bounds to determine consistency.') try: inconsistent = set(self._run_fva_fluxcheck( self._mm, solver, enable_tfba, epsilon)) except FluxCheckFVATaskError: self.report_flux_balance_error() logger.info('Solving took {:.2f} seconds'.format( time.time() - start_time)) # Count the number of reactions that are fixed at zero. While these # reactions are still inconsistent, they are inconsistent because they # have been explicitly disabled. disabled_exchange = 0 disabled_internal = 0 count_exchange = 0 total_exchange = 0 count_internal = 0 total_internal = 0 # Print result for reaction in sorted(self._mm.reactions): disabled = self._mm.limits[reaction].bounds == (0, 0) if self._mm.is_exchange(reaction): total_exchange += 1 count_exchange += int(reaction in inconsistent) disabled_exchange += int(disabled) else: total_internal += 1 count_internal += int(reaction in inconsistent) disabled_internal += int(disabled) if reaction in inconsistent: rx = self._mm.get_reaction(reaction) rxt = rx.translated_compounds(compound_name) print('{}\t{}'.format(reaction, rxt)) logger.info('Model has {}/{} inconsistent internal reactions' ' ({} disabled by user)'.format( count_internal, total_internal, disabled_internal)) logger.info('Model has {}/{} inconsistent exchange reactions' ' ({} disabled by user)'.format( count_exchange, total_exchange, disabled_exchange))
[ "def", "run", "(", "self", ")", ":", "def", "compound_name", "(", "id", ")", ":", "if", "id", "not", "in", "self", ".", "_model", ".", "compounds", ":", "return", "id", "return", "self", ".", "_model", ".", "compounds", "[", "id", "]", ".", "properties", ".", "get", "(", "'name'", ",", "id", ")", "epsilon", "=", "self", ".", "_args", ".", "epsilon", "if", "self", ".", "_args", ".", "unrestricted", ":", "for", "reaction", "in", "self", ".", "_mm", ".", "reactions", ":", "if", "self", ".", "_mm", ".", "is_exchange", "(", "reaction", ")", ":", "del", "self", ".", "_mm", ".", "limits", "[", "reaction", "]", ".", "bounds", "loop_removal", "=", "self", ".", "_get_loop_removal_option", "(", ")", "enable_tfba", "=", "loop_removal", "==", "'tfba'", "enable_fastcore", "=", "self", ".", "_args", ".", "fastcore", "if", "enable_tfba", "and", "enable_fastcore", ":", "self", ".", "argument_error", "(", "'Using Fastcore with thermodynamic constraints'", "' is not supported!'", ")", "start_time", "=", "time", ".", "time", "(", ")", "if", "enable_fastcore", ":", "solver", "=", "self", ".", "_get_solver", "(", ")", "try", ":", "inconsistent", "=", "set", "(", "fastcore", ".", "fastcc", "(", "self", ".", "_mm", ",", "epsilon", ",", "solver", "=", "solver", ")", ")", "except", "fluxanalysis", ".", "FluxBalanceError", "as", "e", ":", "self", ".", "report_flux_balance_error", "(", "e", ")", "else", ":", "if", "enable_tfba", ":", "solver", "=", "self", ".", "_get_solver", "(", "integer", "=", "True", ")", "else", ":", "solver", "=", "self", ".", "_get_solver", "(", ")", "if", "self", ".", "_args", ".", "reduce_lp", ":", "logger", ".", "info", "(", "'Running with reduced number of LP problems.'", ")", "try", ":", "inconsistent", "=", "set", "(", "fluxanalysis", ".", "consistency_check", "(", "self", ".", "_mm", ",", "self", ".", "_mm", ".", "reactions", ",", "epsilon", ",", "tfba", "=", "enable_tfba", ",", "solver", "=", "solver", ")", ")", "except", "fluxanalysis", ".", "FluxBalanceError", "as", "e", ":", "self", ".", "report_flux_balance_error", "(", "e", ")", "else", ":", "logger", ".", "info", "(", "'Using flux bounds to determine consistency.'", ")", "try", ":", "inconsistent", "=", "set", "(", "self", ".", "_run_fva_fluxcheck", "(", "self", ".", "_mm", ",", "solver", ",", "enable_tfba", ",", "epsilon", ")", ")", "except", "FluxCheckFVATaskError", ":", "self", ".", "report_flux_balance_error", "(", ")", "logger", ".", "info", "(", "'Solving took {:.2f} seconds'", ".", "format", "(", "time", ".", "time", "(", ")", "-", "start_time", ")", ")", "disabled_exchange", "=", "0", "disabled_internal", "=", "0", "count_exchange", "=", "0", "total_exchange", "=", "0", "count_internal", "=", "0", "total_internal", "=", "0", "for", "reaction", "in", "sorted", "(", "self", ".", "_mm", ".", "reactions", ")", ":", "disabled", "=", "self", ".", "_mm", ".", "limits", "[", "reaction", "]", ".", "bounds", "==", "(", "0", ",", "0", ")", "if", "self", ".", "_mm", ".", "is_exchange", "(", "reaction", ")", ":", "total_exchange", "+=", "1", "count_exchange", "+=", "int", "(", "reaction", "in", "inconsistent", ")", "disabled_exchange", "+=", "int", "(", "disabled", ")", "else", ":", "total_internal", "+=", "1", "count_internal", "+=", "int", "(", "reaction", "in", "inconsistent", ")", "disabled_internal", "+=", "int", "(", "disabled", ")", "if", "reaction", "in", "inconsistent", ":", "rx", "=", "self", ".", "_mm", ".", "get_reaction", "(", "reaction", ")", "rxt", "=", "rx", ".", "translated_compounds", "(", "compound_name", ")", "print", "(", "'{}\\t{}'", ".", "format", "(", "reaction", ",", "rxt", ")", ")", "logger", ".", "info", "(", "'Model has {}/{} inconsistent internal reactions'", "' ({} disabled by user)'", ".", "format", "(", "count_internal", ",", "total_internal", ",", "disabled_internal", ")", ")", "logger", ".", "info", "(", "'Model has {}/{} inconsistent exchange reactions'", "' ({} disabled by user)'", ".", "format", "(", "count_exchange", ",", "total_exchange", ",", "disabled_exchange", ")", ")" ]
python
Run flux consistency check command
false
1,962,809
def plot_op(fn, inputs=[], outputs=[]): """ User-exposed api method for constructing a python_node Args: fn: python function that computes some np.ndarrays given np.ndarrays as inputs. it can have arbitrary side effects. inputs: array of tf.Tensors (optional). These are where fn derives its values from outputs: tf.Placeholder nodes (optional). These are constructed by the user (which allows the user to plug them into other ht.Ops or tf.Ops). The outputs of fn are mapped to each of the output placeholders. raises an Error if fn cannot map """ global COUNT, ht # check outputs if not isinstance(outputs,list): outputs=[outputs] for tensor in outputs: if tensor.op.type is not 'Placeholder': raise Error('Output nodes must be Placeholders') op=PlotOp(fn, COUNT, inputs, outputs) op_store.add_op(op) COUNT+=1 # if node has output, return value for python_op is the first output (placeholder) tensor # otherwise, return the op if outputs: return outputs[0] else: return op
[ "def", "plot_op", "(", "fn", ",", "inputs", "=", "[", "]", ",", "outputs", "=", "[", "]", ")", ":", "global", "COUNT", ",", "ht", "if", "not", "isinstance", "(", "outputs", ",", "list", ")", ":", "outputs", "=", "[", "outputs", "]", "for", "tensor", "in", "outputs", ":", "if", "tensor", ".", "op", ".", "type", "is", "not", "'Placeholder'", ":", "raise", "Error", "(", "'Output nodes must be Placeholders'", ")", "op", "=", "PlotOp", "(", "fn", ",", "COUNT", ",", "inputs", ",", "outputs", ")", "op_store", ".", "add_op", "(", "op", ")", "COUNT", "+=", "1", "if", "outputs", ":", "return", "outputs", "[", "0", "]", "else", ":", "return", "op" ]
python
User-exposed api method for constructing a python_node Args: fn: python function that computes some np.ndarrays given np.ndarrays as inputs. it can have arbitrary side effects. inputs: array of tf.Tensors (optional). These are where fn derives its values from outputs: tf.Placeholder nodes (optional). These are constructed by the user (which allows the user to plug them into other ht.Ops or tf.Ops). The outputs of fn are mapped to each of the output placeholders. raises an Error if fn cannot map
false
1,631,948
def _get_dir_list(load): ''' Get a list of all directories on the master ''' if 'env' in load: # "env" is not supported; Use "saltenv". load.pop('env') if 'saltenv' not in load or load['saltenv'] not in envs(): return [] ret = set() for repo in init(): repo['repo'].open() ref = _get_ref(repo, load['saltenv']) if ref: manifest = repo['repo'].manifest(rev=ref[1]) for tup in manifest: filepath = tup[4] split = filepath.rsplit('/', 1) while len(split) > 1: relpath = os.path.relpath(split[0], repo['root']) # Don't add '.' if relpath != '.': # Don't add files outside the hgfs_root if not relpath.startswith('../'): ret.add(os.path.join(repo['mountpoint'], relpath)) split = split[0].rsplit('/', 1) repo['repo'].close() if repo['mountpoint']: ret.add(repo['mountpoint']) return sorted(ret)
[ "def", "_get_dir_list", "(", "load", ")", ":", "if", "'env'", "in", "load", ":", "load", ".", "pop", "(", "'env'", ")", "if", "'saltenv'", "not", "in", "load", "or", "load", "[", "'saltenv'", "]", "not", "in", "envs", "(", ")", ":", "return", "[", "]", "ret", "=", "set", "(", ")", "for", "repo", "in", "init", "(", ")", ":", "repo", "[", "'repo'", "]", ".", "open", "(", ")", "ref", "=", "_get_ref", "(", "repo", ",", "load", "[", "'saltenv'", "]", ")", "if", "ref", ":", "manifest", "=", "repo", "[", "'repo'", "]", ".", "manifest", "(", "rev", "=", "ref", "[", "1", "]", ")", "for", "tup", "in", "manifest", ":", "filepath", "=", "tup", "[", "4", "]", "split", "=", "filepath", ".", "rsplit", "(", "'/'", ",", "1", ")", "while", "len", "(", "split", ")", ">", "1", ":", "relpath", "=", "os", ".", "path", ".", "relpath", "(", "split", "[", "0", "]", ",", "repo", "[", "'root'", "]", ")", "if", "relpath", "!=", "'.'", ":", "if", "not", "relpath", ".", "startswith", "(", "'../'", ")", ":", "ret", ".", "add", "(", "os", ".", "path", ".", "join", "(", "repo", "[", "'mountpoint'", "]", ",", "relpath", ")", ")", "split", "=", "split", "[", "0", "]", ".", "rsplit", "(", "'/'", ",", "1", ")", "repo", "[", "'repo'", "]", ".", "close", "(", ")", "if", "repo", "[", "'mountpoint'", "]", ":", "ret", ".", "add", "(", "repo", "[", "'mountpoint'", "]", ")", "return", "sorted", "(", "ret", ")" ]
python
Get a list of all directories on the master
false
2,345,272
def pack(field: str, kwargs: Dict[str, Any], default: Optional[Any] = None, sep: str=',') -> str: """ Util for joining multiple fields with commas """ if default is not None: value = kwargs.get(field, default) else: value = kwargs[field] if isinstance(value, str): return value elif isinstance(value, collections.abc.Iterable): return sep.join(str(f) for f in value) else: return str(value)
[ "def", "pack", "(", "field", ":", "str", ",", "kwargs", ":", "Dict", "[", "str", ",", "Any", "]", ",", "default", ":", "Optional", "[", "Any", "]", "=", "None", ",", "sep", ":", "str", "=", "','", ")", "->", "str", ":", "if", "default", "is", "not", "None", ":", "value", "=", "kwargs", ".", "get", "(", "field", ",", "default", ")", "else", ":", "value", "=", "kwargs", "[", "field", "]", "if", "isinstance", "(", "value", ",", "str", ")", ":", "return", "value", "elif", "isinstance", "(", "value", ",", "collections", ".", "abc", ".", "Iterable", ")", ":", "return", "sep", ".", "join", "(", "str", "(", "f", ")", "for", "f", "in", "value", ")", "else", ":", "return", "str", "(", "value", ")" ]
python
Util for joining multiple fields with commas
false
1,705,035
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'type') and self.type is not None: _dict['type'] = self.type if hasattr(self, 'codec') and self.codec is not None: _dict['codec'] = self.codec if hasattr(self, 'frequency') and self.frequency is not None: _dict['frequency'] = self.frequency if hasattr(self, 'compression') and self.compression is not None: _dict['compression'] = self.compression return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'type'", ")", "and", "self", ".", "type", "is", "not", "None", ":", "_dict", "[", "'type'", "]", "=", "self", ".", "type", "if", "hasattr", "(", "self", ",", "'codec'", ")", "and", "self", ".", "codec", "is", "not", "None", ":", "_dict", "[", "'codec'", "]", "=", "self", ".", "codec", "if", "hasattr", "(", "self", ",", "'frequency'", ")", "and", "self", ".", "frequency", "is", "not", "None", ":", "_dict", "[", "'frequency'", "]", "=", "self", ".", "frequency", "if", "hasattr", "(", "self", ",", "'compression'", ")", "and", "self", ".", "compression", "is", "not", "None", ":", "_dict", "[", "'compression'", "]", "=", "self", ".", "compression", "return", "_dict" ]
python
Return a json dictionary representing this model.
false
2,106,442
def multi_buffer_help(): """Help message for multi buffer dialog. .. versionadded:: 4.0.0 :returns: A message object containing helpful information. :rtype: messaging.message.Message """ message = m.Message() message.add(m.Brand()) message.add(heading()) message.add(content()) return message
[ "def", "multi_buffer_help", "(", ")", ":", "message", "=", "m", ".", "Message", "(", ")", "message", ".", "add", "(", "m", ".", "Brand", "(", ")", ")", "message", ".", "add", "(", "heading", "(", ")", ")", "message", ".", "add", "(", "content", "(", ")", ")", "return", "message" ]
python
Help message for multi buffer dialog. .. versionadded:: 4.0.0 :returns: A message object containing helpful information. :rtype: messaging.message.Message
false
2,534,732
def refresh_gui(delay=0.0001, wait=0.0001): """Use up all the events waiting to be run :param delay: Time to wait before using events :param wait: Time to wait between iterations of events This function will block until all pending events are emitted. This is useful in testing to ensure signals and other asynchronous functionality is required to take place. """ time.sleep(delay) while gtk.events_pending(): gtk.main_iteration_do(block=False) time.sleep(wait)
[ "def", "refresh_gui", "(", "delay", "=", "0.0001", ",", "wait", "=", "0.0001", ")", ":", "time", ".", "sleep", "(", "delay", ")", "while", "gtk", ".", "events_pending", "(", ")", ":", "gtk", ".", "main_iteration_do", "(", "block", "=", "False", ")", "time", ".", "sleep", "(", "wait", ")" ]
python
Use up all the events waiting to be run :param delay: Time to wait before using events :param wait: Time to wait between iterations of events This function will block until all pending events are emitted. This is useful in testing to ensure signals and other asynchronous functionality is required to take place.
false
2,613,534
def __init__(self, gp): """ __init__(limix::CGPCholCache self, CGPbase gp) -> CGPCholCache Parameters ---------- gp: limix::CGPbase * """ this = _core.new_CGPCholCache(gp) try: self.this.append(this) except Exception: self.this = this
[ "def", "__init__", "(", "self", ",", "gp", ")", ":", "this", "=", "_core", ".", "new_CGPCholCache", "(", "gp", ")", "try", ":", "self", ".", "this", ".", "append", "(", "this", ")", "except", "Exception", ":", "self", ".", "this", "=", "this" ]
python
__init__(limix::CGPCholCache self, CGPbase gp) -> CGPCholCache Parameters ---------- gp: limix::CGPbase *
false
2,571,232
def __init__(self, progress_url=None, user_id=None, workflow_state=None, created_at=None, id=None, attachment=None, export_type=None): """Init method for Contentexport class.""" self._progress_url = progress_url self._user_id = user_id self._workflow_state = workflow_state self._created_at = created_at self._id = id self._attachment = attachment self._export_type = export_type self.logger = logging.getLogger('pycanvas.Contentexport')
[ "def", "__init__", "(", "self", ",", "progress_url", "=", "None", ",", "user_id", "=", "None", ",", "workflow_state", "=", "None", ",", "created_at", "=", "None", ",", "id", "=", "None", ",", "attachment", "=", "None", ",", "export_type", "=", "None", ")", ":", "self", ".", "_progress_url", "=", "progress_url", "self", ".", "_user_id", "=", "user_id", "self", ".", "_workflow_state", "=", "workflow_state", "self", ".", "_created_at", "=", "created_at", "self", ".", "_id", "=", "id", "self", ".", "_attachment", "=", "attachment", "self", ".", "_export_type", "=", "export_type", "self", ".", "logger", "=", "logging", ".", "getLogger", "(", "'pycanvas.Contentexport'", ")" ]
python
Init method for Contentexport class.
false
1,610,243
def TimeFromTicks(ticks, tz=None): """Construct a DB-API time value from the given ticks value. :type ticks: float :param ticks: a number of seconds since the epoch; see the documentation of the standard Python time module for details. :type tz: :class:`datetime.tzinfo` :param tz: (Optional) time zone to use for conversion :rtype: :class:`datetime.time` :returns: time represented by ticks. """ dt = datetime.datetime.fromtimestamp(ticks, tz=tz) return dt.timetz()
[ "def", "TimeFromTicks", "(", "ticks", ",", "tz", "=", "None", ")", ":", "dt", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "ticks", ",", "tz", "=", "tz", ")", "return", "dt", ".", "timetz", "(", ")" ]
python
Construct a DB-API time value from the given ticks value. :type ticks: float :param ticks: a number of seconds since the epoch; see the documentation of the standard Python time module for details. :type tz: :class:`datetime.tzinfo` :param tz: (Optional) time zone to use for conversion :rtype: :class:`datetime.time` :returns: time represented by ticks.
false
1,602,856
def kill_process_children(pid): """Find and kill child processes of a process. :param pid: PID of parent process (process ID) :return: Nothing """ if sys.platform == "darwin": kill_process_children_osx(pid) elif sys.platform == "linux": kill_process_children_unix(pid) else: pass
[ "def", "kill_process_children", "(", "pid", ")", ":", "if", "sys", ".", "platform", "==", "\"darwin\"", ":", "kill_process_children_osx", "(", "pid", ")", "elif", "sys", ".", "platform", "==", "\"linux\"", ":", "kill_process_children_unix", "(", "pid", ")", "else", ":", "pass" ]
python
Find and kill child processes of a process. :param pid: PID of parent process (process ID) :return: Nothing
false
2,305,597
def _process_args(self, analysis, args): """ When Naarad is run in CLI mode, get the CL arguments and update the analysis :param: analysis: The analysis being processed :param: args: Command Line Arguments received by naarad """ if args.exit_code: self.return_exit_code = args.exit_code if args.no_plots: self.skip_plots = args.no_plots if args.start: analysis.ts_start = naarad.utils.get_standardized_timestamp(args.start, None) if args.end: analysis.ts_end = naarad.utils.get_standardized_timestamp(args.end, None) if args.variables: analysis.variables = naarad.utils.get_variables(args) return CONSTANTS.OK
[ "def", "_process_args", "(", "self", ",", "analysis", ",", "args", ")", ":", "if", "args", ".", "exit_code", ":", "self", ".", "return_exit_code", "=", "args", ".", "exit_code", "if", "args", ".", "no_plots", ":", "self", ".", "skip_plots", "=", "args", ".", "no_plots", "if", "args", ".", "start", ":", "analysis", ".", "ts_start", "=", "naarad", ".", "utils", ".", "get_standardized_timestamp", "(", "args", ".", "start", ",", "None", ")", "if", "args", ".", "end", ":", "analysis", ".", "ts_end", "=", "naarad", ".", "utils", ".", "get_standardized_timestamp", "(", "args", ".", "end", ",", "None", ")", "if", "args", ".", "variables", ":", "analysis", ".", "variables", "=", "naarad", ".", "utils", ".", "get_variables", "(", "args", ")", "return", "CONSTANTS", ".", "OK" ]
python
When Naarad is run in CLI mode, get the CL arguments and update the analysis :param: analysis: The analysis being processed :param: args: Command Line Arguments received by naarad
false
2,285,554
def print_tree(self) -> str: """Convert AST object to tree view of BEL AST Returns: printed tree of BEL AST """ if self.ast: return self.ast.print_tree(ast_obj=self.ast) else: return ""
[ "def", "print_tree", "(", "self", ")", "->", "str", ":", "if", "self", ".", "ast", ":", "return", "self", ".", "ast", ".", "print_tree", "(", "ast_obj", "=", "self", ".", "ast", ")", "else", ":", "return", "\"\"" ]
python
Convert AST object to tree view of BEL AST Returns: printed tree of BEL AST
false
1,707,192
def __init__( self, size, weights=None, bias=True, l2_regularization=0.0, l1_regularization=0.0, trainable=True, named_tensors=None, scope='linear', summary_labels=() ): """ Linear layer. Args: size: Layer size. weights: Weight initialization, random if None. bias: Bias initialization, random if True, no bias added if False. l2_regularization: L2 regularization weight. l1_regularization: L1 regularization weight. """ self.size = size self.weights_init = weights self.bias_init = bias self.l2_regularization = l2_regularization self.l1_regularization = l1_regularization self.trainable = trainable super(Linear, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels)
[ "def", "__init__", "(", "self", ",", "size", ",", "weights", "=", "None", ",", "bias", "=", "True", ",", "l2_regularization", "=", "0.0", ",", "l1_regularization", "=", "0.0", ",", "trainable", "=", "True", ",", "named_tensors", "=", "None", ",", "scope", "=", "'linear'", ",", "summary_labels", "=", "(", ")", ")", ":", "self", ".", "size", "=", "size", "self", ".", "weights_init", "=", "weights", "self", ".", "bias_init", "=", "bias", "self", ".", "l2_regularization", "=", "l2_regularization", "self", ".", "l1_regularization", "=", "l1_regularization", "self", ".", "trainable", "=", "trainable", "super", "(", "Linear", ",", "self", ")", ".", "__init__", "(", "named_tensors", "=", "named_tensors", ",", "scope", "=", "scope", ",", "summary_labels", "=", "summary_labels", ")" ]
python
Linear layer. Args: size: Layer size. weights: Weight initialization, random if None. bias: Bias initialization, random if True, no bias added if False. l2_regularization: L2 regularization weight. l1_regularization: L1 regularization weight.
false
2,703,718
def parse_option(self, option, block_name, *values): """ Parse duration option for timer. """ try: if len(values) != 1: raise TypeError self.total_duration = int(values[0]) if self.total_duration <= 0: raise ValueError except ValueError: pattern = u'"{0}" must be an integer > 0' raise ValueError(pattern.format(option))
[ "def", "parse_option", "(", "self", ",", "option", ",", "block_name", ",", "*", "values", ")", ":", "try", ":", "if", "len", "(", "values", ")", "!=", "1", ":", "raise", "TypeError", "self", ".", "total_duration", "=", "int", "(", "values", "[", "0", "]", ")", "if", "self", ".", "total_duration", "<=", "0", ":", "raise", "ValueError", "except", "ValueError", ":", "pattern", "=", "u'\"{0}\" must be an integer > 0'", "raise", "ValueError", "(", "pattern", ".", "format", "(", "option", ")", ")" ]
python
Parse duration option for timer.
false
2,396,531
def _set_trunk_private_vlan_classification(self, v, load=False): """ Setter method for trunk_private_vlan_classification, mapped from YANG variable /interface/port_channel/switchport/trunk_private_vlan_classification (container) If this variable is read-only (config: false) in the source YANG file, then _set_trunk_private_vlan_classification is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_trunk_private_vlan_classification() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=trunk_private_vlan_classification.trunk_private_vlan_classification, is_container='container', presence=False, yang_name="trunk-private-vlan-classification", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'ctag-pvlan-classification-phy-config'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """trunk_private_vlan_classification must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=trunk_private_vlan_classification.trunk_private_vlan_classification, is_container='container', presence=False, yang_name="trunk-private-vlan-classification", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'ctag-pvlan-classification-phy-config'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""", }) self.__trunk_private_vlan_classification = t if hasattr(self, '_set'): self._set()
[ "def", "_set_trunk_private_vlan_classification", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "trunk_private_vlan_classification", ".", "trunk_private_vlan_classification", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"trunk-private-vlan-classification\"", ",", "rest_name", "=", "\"\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'cli-drop-node-name'", ":", "None", ",", "u'callpoint'", ":", "u'ctag-pvlan-classification-phy-config'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-interface'", ",", "defining_module", "=", "'brocade-interface'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"trunk_private_vlan_classification must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=trunk_private_vlan_classification.trunk_private_vlan_classification, is_container='container', presence=False, yang_name=\"trunk-private-vlan-classification\", rest_name=\"\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'ctag-pvlan-classification-phy-config'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__trunk_private_vlan_classification", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
python
Setter method for trunk_private_vlan_classification, mapped from YANG variable /interface/port_channel/switchport/trunk_private_vlan_classification (container) If this variable is read-only (config: false) in the source YANG file, then _set_trunk_private_vlan_classification is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_trunk_private_vlan_classification() directly.
false
2,155,265
def censor(self, input_text): """Returns input_text with any profane words censored.""" bad_words = self.get_profane_words() res = input_text for word in bad_words: # Apply word boundaries to the bad word regex_string = r'{0}' if self._no_word_boundaries else r'\b{0}\b' regex_string = regex_string.format(word) regex = re.compile(regex_string, re.IGNORECASE) res = regex.sub(self._censor_char * len(word), res) return res
[ "def", "censor", "(", "self", ",", "input_text", ")", ":", "bad_words", "=", "self", ".", "get_profane_words", "(", ")", "res", "=", "input_text", "for", "word", "in", "bad_words", ":", "regex_string", "=", "r'{0}'", "if", "self", ".", "_no_word_boundaries", "else", "r'\\b{0}\\b'", "regex_string", "=", "regex_string", ".", "format", "(", "word", ")", "regex", "=", "re", ".", "compile", "(", "regex_string", ",", "re", ".", "IGNORECASE", ")", "res", "=", "regex", ".", "sub", "(", "self", ".", "_censor_char", "*", "len", "(", "word", ")", ",", "res", ")", "return", "res" ]
python
Returns input_text with any profane words censored.
false
2,310,823
def set_value(self, value): """Set the value associated with the keyword""" if not isinstance(value, str): raise TypeError("A value must be a string, got %s." % value) self.__value = value
[ "def", "set_value", "(", "self", ",", "value", ")", ":", "if", "not", "isinstance", "(", "value", ",", "str", ")", ":", "raise", "TypeError", "(", "\"A value must be a string, got %s.\"", "%", "value", ")", "self", ".", "__value", "=", "value" ]
python
Set the value associated with the keyword
false
1,945,046
def start(self): """ Start the node on the cloud using the given instance properties. This method is non-blocking: as soon as the node id is returned from the cloud provider, it will return. The `is_alive`:meth: and `update_ips`:meth: methods should be used to further gather details about the state of the node. """ log.info("Starting node `%s` from image `%s` with flavor %s ...", self.name, self.image_id, self.flavor) self.instance_id = self._cloud_provider.start_instance( self.user_key_name, self.user_key_public, self.user_key_private, self.security_group, self.flavor, self.image_id, self.image_userdata, username=self.image_user, node_name=("%s-%s" % (self.cluster_name, self.name)), **self.extra) log.debug("Node `%s` has instance ID `%s`", self.name, self.instance_id)
[ "def", "start", "(", "self", ")", ":", "log", ".", "info", "(", "\"Starting node `%s` from image `%s` with flavor %s ...\"", ",", "self", ".", "name", ",", "self", ".", "image_id", ",", "self", ".", "flavor", ")", "self", ".", "instance_id", "=", "self", ".", "_cloud_provider", ".", "start_instance", "(", "self", ".", "user_key_name", ",", "self", ".", "user_key_public", ",", "self", ".", "user_key_private", ",", "self", ".", "security_group", ",", "self", ".", "flavor", ",", "self", ".", "image_id", ",", "self", ".", "image_userdata", ",", "username", "=", "self", ".", "image_user", ",", "node_name", "=", "(", "\"%s-%s\"", "%", "(", "self", ".", "cluster_name", ",", "self", ".", "name", ")", ")", ",", "**", "self", ".", "extra", ")", "log", ".", "debug", "(", "\"Node `%s` has instance ID `%s`\"", ",", "self", ".", "name", ",", "self", ".", "instance_id", ")" ]
python
Start the node on the cloud using the given instance properties. This method is non-blocking: as soon as the node id is returned from the cloud provider, it will return. The `is_alive`:meth: and `update_ips`:meth: methods should be used to further gather details about the state of the node.
false
1,695,976
def simulate(sim_type: str, num_qubits: int, num_gates: int, num_prefix_qubits: int = 0, use_processes: bool = False) -> None: """"Runs the simulator.""" circuit = cirq.Circuit(device=test_device) for _ in range(num_gates): which = np.random.choice(['expz', 'expw', 'exp11']) if which == 'expw': q1 = cirq.GridQubit(0, np.random.randint(num_qubits)) circuit.append( cirq.PhasedXPowGate( phase_exponent=np.random.random(), exponent=np.random.random() ).on(q1)) elif which == 'expz': q1 = cirq.GridQubit(0, np.random.randint(num_qubits)) circuit.append( cirq.Z(q1)**np.random.random()) elif which == 'exp11': q1 = cirq.GridQubit(0, np.random.randint(num_qubits - 1)) q2 = cirq.GridQubit(0, q1.col + 1) circuit.append(cirq.CZ(q1, q2)**np.random.random()) if sim_type == _XMON: options = cg.XmonOptions(num_shards=2 ** num_prefix_qubits, use_processes=use_processes) cg.XmonSimulator(options).run(circuit) elif sim_type == _UNITARY: circuit.apply_unitary_effect_to_state(initial_state=0) elif sim_type == _DENSITY: cirq.DensityMatrixSimulator().run(circuit)
[ "def", "simulate", "(", "sim_type", ":", "str", ",", "num_qubits", ":", "int", ",", "num_gates", ":", "int", ",", "num_prefix_qubits", ":", "int", "=", "0", ",", "use_processes", ":", "bool", "=", "False", ")", "->", "None", ":", "circuit", "=", "cirq", ".", "Circuit", "(", "device", "=", "test_device", ")", "for", "_", "in", "range", "(", "num_gates", ")", ":", "which", "=", "np", ".", "random", ".", "choice", "(", "[", "'expz'", ",", "'expw'", ",", "'exp11'", "]", ")", "if", "which", "==", "'expw'", ":", "q1", "=", "cirq", ".", "GridQubit", "(", "0", ",", "np", ".", "random", ".", "randint", "(", "num_qubits", ")", ")", "circuit", ".", "append", "(", "cirq", ".", "PhasedXPowGate", "(", "phase_exponent", "=", "np", ".", "random", ".", "random", "(", ")", ",", "exponent", "=", "np", ".", "random", ".", "random", "(", ")", ")", ".", "on", "(", "q1", ")", ")", "elif", "which", "==", "'expz'", ":", "q1", "=", "cirq", ".", "GridQubit", "(", "0", ",", "np", ".", "random", ".", "randint", "(", "num_qubits", ")", ")", "circuit", ".", "append", "(", "cirq", ".", "Z", "(", "q1", ")", "**", "np", ".", "random", ".", "random", "(", ")", ")", "elif", "which", "==", "'exp11'", ":", "q1", "=", "cirq", ".", "GridQubit", "(", "0", ",", "np", ".", "random", ".", "randint", "(", "num_qubits", "-", "1", ")", ")", "q2", "=", "cirq", ".", "GridQubit", "(", "0", ",", "q1", ".", "col", "+", "1", ")", "circuit", ".", "append", "(", "cirq", ".", "CZ", "(", "q1", ",", "q2", ")", "**", "np", ".", "random", ".", "random", "(", ")", ")", "if", "sim_type", "==", "_XMON", ":", "options", "=", "cg", ".", "XmonOptions", "(", "num_shards", "=", "2", "**", "num_prefix_qubits", ",", "use_processes", "=", "use_processes", ")", "cg", ".", "XmonSimulator", "(", "options", ")", ".", "run", "(", "circuit", ")", "elif", "sim_type", "==", "_UNITARY", ":", "circuit", ".", "apply_unitary_effect_to_state", "(", "initial_state", "=", "0", ")", "elif", "sim_type", "==", "_DENSITY", ":", "cirq", ".", "DensityMatrixSimulator", "(", ")", ".", "run", "(", "circuit", ")" ]
python
Runs the simulator.
false
1,819,706
def n_executions(self): """ Queries and returns the number of past task executions. """ pipeline = self.tiger.connection.pipeline() pipeline.exists(self.tiger._key('task', self.id)) pipeline.llen(self.tiger._key('task', self.id, 'executions')) exists, n_executions = pipeline.execute() if not exists: raise TaskNotFound('Task {} not found.'.format( self.id )) return n_executions
[ "def", "n_executions", "(", "self", ")", ":", "pipeline", "=", "self", ".", "tiger", ".", "connection", ".", "pipeline", "(", ")", "pipeline", ".", "exists", "(", "self", ".", "tiger", ".", "_key", "(", "'task'", ",", "self", ".", "id", ")", ")", "pipeline", ".", "llen", "(", "self", ".", "tiger", ".", "_key", "(", "'task'", ",", "self", ".", "id", ",", "'executions'", ")", ")", "exists", ",", "n_executions", "=", "pipeline", ".", "execute", "(", ")", "if", "not", "exists", ":", "raise", "TaskNotFound", "(", "'Task {} not found.'", ".", "format", "(", "self", ".", "id", ")", ")", "return", "n_executions" ]
python
Queries and returns the number of past task executions.
false
1,970,407
def get_field_info(wrapper,entity_type): 'type: wrapper :atws.Wrapper' fields = wrapper.new('GetFieldInfo') fields.psObjectType = entity_type return wrapper.GetFieldInfo(fields)
[ "def", "get_field_info", "(", "wrapper", ",", "entity_type", ")", ":", "fields", "=", "wrapper", ".", "new", "(", "'GetFieldInfo'", ")", "fields", ".", "psObjectType", "=", "entity_type", "return", "wrapper", ".", "GetFieldInfo", "(", "fields", ")" ]
python
type: wrapper :atws.Wrapper
false
1,869,055
def __init__(self, version, service_sid, sid): """ Initialize the FunctionContext :param Version version: Version that contains the resource :param service_sid: The service_sid :param sid: The sid :returns: twilio.rest.serverless.v1.service.function.FunctionContext :rtype: twilio.rest.serverless.v1.service.function.FunctionContext """ super(FunctionContext, self).__init__(version) # Path Solution self._solution = {'service_sid': service_sid, 'sid': sid, } self._uri = '/Services/{service_sid}/Functions/{sid}'.format(**self._solution) # Dependents self._function_versions = None
[ "def", "__init__", "(", "self", ",", "version", ",", "service_sid", ",", "sid", ")", ":", "super", "(", "FunctionContext", ",", "self", ")", ".", "__init__", "(", "version", ")", "self", ".", "_solution", "=", "{", "'service_sid'", ":", "service_sid", ",", "'sid'", ":", "sid", ",", "}", "self", ".", "_uri", "=", "'/Services/{service_sid}/Functions/{sid}'", ".", "format", "(", "**", "self", ".", "_solution", ")", "self", ".", "_function_versions", "=", "None" ]
python
Initialize the FunctionContext :param Version version: Version that contains the resource :param service_sid: The service_sid :param sid: The sid :returns: twilio.rest.serverless.v1.service.function.FunctionContext :rtype: twilio.rest.serverless.v1.service.function.FunctionContext
false
1,677,350
def _tf_batch_map_coordinates(self, inputs, coords): """Batch version of tf_map_coordinates Only supports 2D feature maps Parameters ---------- inputs : ``tf.Tensor`` shape = (b*c, h, w) coords : ``tf.Tensor`` shape = (b*c, h, w, n, 2) Returns ------- ``tf.Tensor`` A Tensor with the shape as (b*c, h, w, n) """ input_shape = inputs.get_shape() coords_shape = coords.get_shape() batch_channel = tf.shape(inputs)[0] input_h = int(input_shape[1]) input_w = int(input_shape[2]) kernel_n = int(coords_shape[3]) n_coords = input_h * input_w * kernel_n coords_lt = tf.cast(tf.floor(coords), 'int32') coords_rb = tf.cast(tf.ceil(coords), 'int32') coords_lb = tf.stack([coords_lt[:, :, :, :, 0], coords_rb[:, :, :, :, 1]], axis=-1) coords_rt = tf.stack([coords_rb[:, :, :, :, 0], coords_lt[:, :, :, :, 1]], axis=-1) idx = self._tf_repeat(tf.range(batch_channel), n_coords) vals_lt = self._get_vals_by_coords(inputs, coords_lt, idx, (batch_channel, input_h, input_w, kernel_n)) vals_rb = self._get_vals_by_coords(inputs, coords_rb, idx, (batch_channel, input_h, input_w, kernel_n)) vals_lb = self._get_vals_by_coords(inputs, coords_lb, idx, (batch_channel, input_h, input_w, kernel_n)) vals_rt = self._get_vals_by_coords(inputs, coords_rt, idx, (batch_channel, input_h, input_w, kernel_n)) coords_offset_lt = coords - tf.cast(coords_lt, 'float32') vals_t = vals_lt + (vals_rt - vals_lt) * coords_offset_lt[:, :, :, :, 0] vals_b = vals_lb + (vals_rb - vals_lb) * coords_offset_lt[:, :, :, :, 0] mapped_vals = vals_t + (vals_b - vals_t) * coords_offset_lt[:, :, :, :, 1] return mapped_vals
[ "def", "_tf_batch_map_coordinates", "(", "self", ",", "inputs", ",", "coords", ")", ":", "input_shape", "=", "inputs", ".", "get_shape", "(", ")", "coords_shape", "=", "coords", ".", "get_shape", "(", ")", "batch_channel", "=", "tf", ".", "shape", "(", "inputs", ")", "[", "0", "]", "input_h", "=", "int", "(", "input_shape", "[", "1", "]", ")", "input_w", "=", "int", "(", "input_shape", "[", "2", "]", ")", "kernel_n", "=", "int", "(", "coords_shape", "[", "3", "]", ")", "n_coords", "=", "input_h", "*", "input_w", "*", "kernel_n", "coords_lt", "=", "tf", ".", "cast", "(", "tf", ".", "floor", "(", "coords", ")", ",", "'int32'", ")", "coords_rb", "=", "tf", ".", "cast", "(", "tf", ".", "ceil", "(", "coords", ")", ",", "'int32'", ")", "coords_lb", "=", "tf", ".", "stack", "(", "[", "coords_lt", "[", ":", ",", ":", ",", ":", ",", ":", ",", "0", "]", ",", "coords_rb", "[", ":", ",", ":", ",", ":", ",", ":", ",", "1", "]", "]", ",", "axis", "=", "-", "1", ")", "coords_rt", "=", "tf", ".", "stack", "(", "[", "coords_rb", "[", ":", ",", ":", ",", ":", ",", ":", ",", "0", "]", ",", "coords_lt", "[", ":", ",", ":", ",", ":", ",", ":", ",", "1", "]", "]", ",", "axis", "=", "-", "1", ")", "idx", "=", "self", ".", "_tf_repeat", "(", "tf", ".", "range", "(", "batch_channel", ")", ",", "n_coords", ")", "vals_lt", "=", "self", ".", "_get_vals_by_coords", "(", "inputs", ",", "coords_lt", ",", "idx", ",", "(", "batch_channel", ",", "input_h", ",", "input_w", ",", "kernel_n", ")", ")", "vals_rb", "=", "self", ".", "_get_vals_by_coords", "(", "inputs", ",", "coords_rb", ",", "idx", ",", "(", "batch_channel", ",", "input_h", ",", "input_w", ",", "kernel_n", ")", ")", "vals_lb", "=", "self", ".", "_get_vals_by_coords", "(", "inputs", ",", "coords_lb", ",", "idx", ",", "(", "batch_channel", ",", "input_h", ",", "input_w", ",", "kernel_n", ")", ")", "vals_rt", "=", "self", ".", "_get_vals_by_coords", "(", "inputs", ",", "coords_rt", ",", "idx", ",", "(", "batch_channel", ",", "input_h", ",", "input_w", ",", "kernel_n", ")", ")", "coords_offset_lt", "=", "coords", "-", "tf", ".", "cast", "(", "coords_lt", ",", "'float32'", ")", "vals_t", "=", "vals_lt", "+", "(", "vals_rt", "-", "vals_lt", ")", "*", "coords_offset_lt", "[", ":", ",", ":", ",", ":", ",", ":", ",", "0", "]", "vals_b", "=", "vals_lb", "+", "(", "vals_rb", "-", "vals_lb", ")", "*", "coords_offset_lt", "[", ":", ",", ":", ",", ":", ",", ":", ",", "0", "]", "mapped_vals", "=", "vals_t", "+", "(", "vals_b", "-", "vals_t", ")", "*", "coords_offset_lt", "[", ":", ",", ":", ",", ":", ",", ":", ",", "1", "]", "return", "mapped_vals" ]
python
Batch version of tf_map_coordinates Only supports 2D feature maps Parameters ---------- inputs : ``tf.Tensor`` shape = (b*c, h, w) coords : ``tf.Tensor`` shape = (b*c, h, w, n, 2) Returns ------- ``tf.Tensor`` A Tensor with the shape as (b*c, h, w, n)
false
1,977,377
def validate(request: Union[Dict, List], schema: dict) -> Union[Dict, List]: """ Wraps jsonschema.validate, returning the same object passed in. Args: request: The deserialized-from-json request. schema: The jsonschema schema to validate against. Raises: jsonschema.ValidationError """ jsonschema_validate(request, schema) return request
[ "def", "validate", "(", "request", ":", "Union", "[", "Dict", ",", "List", "]", ",", "schema", ":", "dict", ")", "->", "Union", "[", "Dict", ",", "List", "]", ":", "jsonschema_validate", "(", "request", ",", "schema", ")", "return", "request" ]
python
Wraps jsonschema.validate, returning the same object passed in. Args: request: The deserialized-from-json request. schema: The jsonschema schema to validate against. Raises: jsonschema.ValidationError
false
1,793,375
def _detect(self): """ Detect shadowing of built-in symbols Recursively visit the calls Returns: list: {'vuln', 'filename,'contract','func', 'shadow'} """ results = [] for contract in self.contracts: shadows = self.detect_builtin_shadowing_definitions(contract) if shadows: for shadow in shadows: # Obtain components shadow_type = shadow[0] shadow_object = shadow[1] local_variable_parent = shadow[2] # Build the path for our info string local_variable_path = contract.name + "." if local_variable_parent is not None: local_variable_path += local_variable_parent.name + "." local_variable_path += shadow_object.name info = '{} ({} @ {}) shadows built-in symbol \"{}"\n'.format(local_variable_path, shadow_type, shadow_object.source_mapping_str, shadow_object.name) # Generate relevant JSON data for this shadowing definition. json = self.generate_json_result(info) if shadow_type in [self.SHADOWING_FUNCTION, self.SHADOWING_MODIFIER, self.SHADOWING_EVENT]: self.add_function_to_json(shadow_object, json) elif shadow_type in [self.SHADOWING_STATE_VARIABLE, self.SHADOWING_LOCAL_VARIABLE]: self.add_variable_to_json(shadow_object, json) results.append(json) return results
[ "def", "_detect", "(", "self", ")", ":", "results", "=", "[", "]", "for", "contract", "in", "self", ".", "contracts", ":", "shadows", "=", "self", ".", "detect_builtin_shadowing_definitions", "(", "contract", ")", "if", "shadows", ":", "for", "shadow", "in", "shadows", ":", "shadow_type", "=", "shadow", "[", "0", "]", "shadow_object", "=", "shadow", "[", "1", "]", "local_variable_parent", "=", "shadow", "[", "2", "]", "local_variable_path", "=", "contract", ".", "name", "+", "\".\"", "if", "local_variable_parent", "is", "not", "None", ":", "local_variable_path", "+=", "local_variable_parent", ".", "name", "+", "\".\"", "local_variable_path", "+=", "shadow_object", ".", "name", "info", "=", "'{} ({} @ {}) shadows built-in symbol \\\"{}\"\\n'", ".", "format", "(", "local_variable_path", ",", "shadow_type", ",", "shadow_object", ".", "source_mapping_str", ",", "shadow_object", ".", "name", ")", "json", "=", "self", ".", "generate_json_result", "(", "info", ")", "if", "shadow_type", "in", "[", "self", ".", "SHADOWING_FUNCTION", ",", "self", ".", "SHADOWING_MODIFIER", ",", "self", ".", "SHADOWING_EVENT", "]", ":", "self", ".", "add_function_to_json", "(", "shadow_object", ",", "json", ")", "elif", "shadow_type", "in", "[", "self", ".", "SHADOWING_STATE_VARIABLE", ",", "self", ".", "SHADOWING_LOCAL_VARIABLE", "]", ":", "self", ".", "add_variable_to_json", "(", "shadow_object", ",", "json", ")", "results", ".", "append", "(", "json", ")", "return", "results" ]
python
Detect shadowing of built-in symbols Recursively visit the calls Returns: list: {'vuln', 'filename,'contract','func', 'shadow'}
false
2,145,308
def assert_key_has_value(self, key, caller): """Assert that context contains key which also has a value. Args: key: validate this key exists in context AND has a value that isn't None. caller: string. calling function name - this used to construct error messages Raises: KeyNotInContextError: Key doesn't exist KeyInContextHasNoValueError: context[key] is None AssertionError: if key is None """ assert key, ("key parameter must be specified.") self.assert_key_exists(key, caller) if self[key] is None: raise KeyInContextHasNoValueError( f"context['{key}'] must have a value for {caller}.")
[ "def", "assert_key_has_value", "(", "self", ",", "key", ",", "caller", ")", ":", "assert", "key", ",", "(", "\"key parameter must be specified.\"", ")", "self", ".", "assert_key_exists", "(", "key", ",", "caller", ")", "if", "self", "[", "key", "]", "is", "None", ":", "raise", "KeyInContextHasNoValueError", "(", "f\"context['{key}'] must have a value for {caller}.\"", ")" ]
python
Assert that context contains key which also has a value. Args: key: validate this key exists in context AND has a value that isn't None. caller: string. calling function name - this used to construct error messages Raises: KeyNotInContextError: Key doesn't exist KeyInContextHasNoValueError: context[key] is None AssertionError: if key is None
false
2,606,263
def get_asset_search_session(self, proxy): """Gets an asset search session. arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.AssetSearchSession) - an AssetSearchSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_asset_search() is false compliance: optional - This method must be implemented if supports_asset_search() is true. """ if not self.supports_asset_search(): raise Unimplemented() try: from . import sessions except ImportError: raise # OperationFailed() proxy = self._convert_proxy(proxy) try: session = sessions.AssetSearchSession(proxy, runtime=self._runtime) except AttributeError: raise # OperationFailed() return session
[ "def", "get_asset_search_session", "(", "self", ",", "proxy", ")", ":", "if", "not", "self", ".", "supports_asset_search", "(", ")", ":", "raise", "Unimplemented", "(", ")", "try", ":", "from", ".", "import", "sessions", "except", "ImportError", ":", "raise", "proxy", "=", "self", ".", "_convert_proxy", "(", "proxy", ")", "try", ":", "session", "=", "sessions", ".", "AssetSearchSession", "(", "proxy", ",", "runtime", "=", "self", ".", "_runtime", ")", "except", "AttributeError", ":", "raise", "return", "session" ]
python
Gets an asset search session. arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.AssetSearchSession) - an AssetSearchSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_asset_search() is false compliance: optional - This method must be implemented if supports_asset_search() is true.
false
2,141,011
def optimise_tile(tile_bytes): """ Decode a sequence of bytes as an MVT tile and reorder the string table of its layers and the order of its multilinestrings to save a few bytes. """ t = tile() t.ParseFromString(tile_bytes) for layer in t.layers: sto = StringTableOptimiser() for feature in layer.features: # (multi)linestrings only if feature.type == 2: optimise_multilinestring(feature.geometry) sto.add_tags(feature.tags) sto.update_string_table(layer) return t.SerializeToString()
[ "def", "optimise_tile", "(", "tile_bytes", ")", ":", "t", "=", "tile", "(", ")", "t", ".", "ParseFromString", "(", "tile_bytes", ")", "for", "layer", "in", "t", ".", "layers", ":", "sto", "=", "StringTableOptimiser", "(", ")", "for", "feature", "in", "layer", ".", "features", ":", "if", "feature", ".", "type", "==", "2", ":", "optimise_multilinestring", "(", "feature", ".", "geometry", ")", "sto", ".", "add_tags", "(", "feature", ".", "tags", ")", "sto", ".", "update_string_table", "(", "layer", ")", "return", "t", ".", "SerializeToString", "(", ")" ]
python
Decode a sequence of bytes as an MVT tile and reorder the string table of its layers and the order of its multilinestrings to save a few bytes.
false
2,647,587
def soup(self, *args, **kwargs): """Parse the currently loaded website. Optionally, SoupStrainer can be used to only parse relevant parts of the page. This can be particularly useful if the website is complex or perfomance is a factor. <https://www.crummy.com/software/BeautifulSoup/bs4/doc/#soupstrainer> Args: *args: Optional positional arguments that `SoupStrainer` takes. **kwargs: Optional keyword argument that `SoupStrainer` takes. Returns: A `BeautifulSoup` object. Raises: NoWebsiteLoadedError: If no website is currently loaded. ParsingError: If the current response isn't supported by `bs4` """ if self._url is None: raise NoWebsiteLoadedError('website parsing requires a loaded website') content_type = self._response.headers.get('Content-Type', '') if not any(markup in content_type for markup in ('html', 'xml')): raise ParsingError('unsupported content type \'{}\''.format(content_type)) strainer = SoupStrainer(*args, **kwargs) return BeautifulSoup(self._response.content, self.parser, parse_only=strainer)
[ "def", "soup", "(", "self", ",", "*", "args", ",", "**", "kwargs", ")", ":", "if", "self", ".", "_url", "is", "None", ":", "raise", "NoWebsiteLoadedError", "(", "'website parsing requires a loaded website'", ")", "content_type", "=", "self", ".", "_response", ".", "headers", ".", "get", "(", "'Content-Type'", ",", "''", ")", "if", "not", "any", "(", "markup", "in", "content_type", "for", "markup", "in", "(", "'html'", ",", "'xml'", ")", ")", ":", "raise", "ParsingError", "(", "'unsupported content type \\'{}\\''", ".", "format", "(", "content_type", ")", ")", "strainer", "=", "SoupStrainer", "(", "*", "args", ",", "**", "kwargs", ")", "return", "BeautifulSoup", "(", "self", ".", "_response", ".", "content", ",", "self", ".", "parser", ",", "parse_only", "=", "strainer", ")" ]
python
Parse the currently loaded website. Optionally, SoupStrainer can be used to only parse relevant parts of the page. This can be particularly useful if the website is complex or perfomance is a factor. <https://www.crummy.com/software/BeautifulSoup/bs4/doc/#soupstrainer> Args: *args: Optional positional arguments that `SoupStrainer` takes. **kwargs: Optional keyword argument that `SoupStrainer` takes. Returns: A `BeautifulSoup` object. Raises: NoWebsiteLoadedError: If no website is currently loaded. ParsingError: If the current response isn't supported by `bs4`
false
1,672,692
def conv2d(inputs, num_filters_out, kernel_size, stride=1, padding='SAME', activation=tf.nn.relu, stddev=0.01, bias=0.0, weight_decay=0, batch_norm_params=None, is_training=True, trainable=True, restore=True, scope=None, reuse=None): """Adds a 2D convolution followed by an optional batch_norm layer. conv2d creates a variable called 'weights', representing the convolutional kernel, that is convolved with the input. If `batch_norm_params` is None, a second variable called 'biases' is added to the result of the convolution operation. Args: inputs: a tensor of size [batch_size, height, width, channels]. num_filters_out: the number of output filters. kernel_size: a list of length 2: [kernel_height, kernel_width] of of the filters. Can be an int if both values are the same. stride: a list of length 2: [stride_height, stride_width]. Can be an int if both strides are the same. Note that presently both strides must have the same value. padding: one of 'VALID' or 'SAME'. activation: activation function. stddev: standard deviation of the truncated guassian weight distribution. bias: the initial value of the biases. weight_decay: the weight decay. batch_norm_params: parameters for the batch_norm. If is None don't use it. is_training: whether or not the model is in training mode. trainable: whether or not the variables should be trainable or not. restore: whether or not the variables should be marked for restore. scope: Optional scope for variable_scope. reuse: whether or not the layer and its variables should be reused. To be able to reuse the layer scope must be given. Returns: a tensor representing the output of the operation. """ with tf.variable_scope(scope, 'Conv', [inputs], reuse=reuse): kernel_h, kernel_w = _two_element_tuple(kernel_size) stride_h, stride_w = _two_element_tuple(stride) num_filters_in = inputs.get_shape()[-1] weights_shape = [kernel_h, kernel_w, num_filters_in, num_filters_out] weights_initializer = tf.truncated_normal_initializer(stddev=stddev) l2_regularizer = None if weight_decay and weight_decay > 0: l2_regularizer = losses.l2_regularizer(weight_decay) weights = variables.variable('weights', shape=weights_shape, initializer=weights_initializer, regularizer=l2_regularizer, trainable=trainable, restore=restore) conv = tf.nn.conv2d(inputs, weights, [1, stride_h, stride_w, 1], padding=padding) if batch_norm_params is not None: with scopes.arg_scope([batch_norm], is_training=is_training, trainable=trainable, restore=restore): outputs = batch_norm(conv, **batch_norm_params) else: bias_shape = [num_filters_out,] bias_initializer = tf.constant_initializer(bias) biases = variables.variable('biases', shape=bias_shape, initializer=bias_initializer, trainable=trainable, restore=restore) outputs = tf.nn.bias_add(conv, biases) if activation: outputs = activation(outputs) return outputs
[ "def", "conv2d", "(", "inputs", ",", "num_filters_out", ",", "kernel_size", ",", "stride", "=", "1", ",", "padding", "=", "'SAME'", ",", "activation", "=", "tf", ".", "nn", ".", "relu", ",", "stddev", "=", "0.01", ",", "bias", "=", "0.0", ",", "weight_decay", "=", "0", ",", "batch_norm_params", "=", "None", ",", "is_training", "=", "True", ",", "trainable", "=", "True", ",", "restore", "=", "True", ",", "scope", "=", "None", ",", "reuse", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "scope", ",", "'Conv'", ",", "[", "inputs", "]", ",", "reuse", "=", "reuse", ")", ":", "kernel_h", ",", "kernel_w", "=", "_two_element_tuple", "(", "kernel_size", ")", "stride_h", ",", "stride_w", "=", "_two_element_tuple", "(", "stride", ")", "num_filters_in", "=", "inputs", ".", "get_shape", "(", ")", "[", "-", "1", "]", "weights_shape", "=", "[", "kernel_h", ",", "kernel_w", ",", "num_filters_in", ",", "num_filters_out", "]", "weights_initializer", "=", "tf", ".", "truncated_normal_initializer", "(", "stddev", "=", "stddev", ")", "l2_regularizer", "=", "None", "if", "weight_decay", "and", "weight_decay", ">", "0", ":", "l2_regularizer", "=", "losses", ".", "l2_regularizer", "(", "weight_decay", ")", "weights", "=", "variables", ".", "variable", "(", "'weights'", ",", "shape", "=", "weights_shape", ",", "initializer", "=", "weights_initializer", ",", "regularizer", "=", "l2_regularizer", ",", "trainable", "=", "trainable", ",", "restore", "=", "restore", ")", "conv", "=", "tf", ".", "nn", ".", "conv2d", "(", "inputs", ",", "weights", ",", "[", "1", ",", "stride_h", ",", "stride_w", ",", "1", "]", ",", "padding", "=", "padding", ")", "if", "batch_norm_params", "is", "not", "None", ":", "with", "scopes", ".", "arg_scope", "(", "[", "batch_norm", "]", ",", "is_training", "=", "is_training", ",", "trainable", "=", "trainable", ",", "restore", "=", "restore", ")", ":", "outputs", "=", "batch_norm", "(", "conv", ",", "**", "batch_norm_params", ")", "else", ":", "bias_shape", "=", "[", "num_filters_out", ",", "]", "bias_initializer", "=", "tf", ".", "constant_initializer", "(", "bias", ")", "biases", "=", "variables", ".", "variable", "(", "'biases'", ",", "shape", "=", "bias_shape", ",", "initializer", "=", "bias_initializer", ",", "trainable", "=", "trainable", ",", "restore", "=", "restore", ")", "outputs", "=", "tf", ".", "nn", ".", "bias_add", "(", "conv", ",", "biases", ")", "if", "activation", ":", "outputs", "=", "activation", "(", "outputs", ")", "return", "outputs" ]
python
Adds a 2D convolution followed by an optional batch_norm layer. conv2d creates a variable called 'weights', representing the convolutional kernel, that is convolved with the input. If `batch_norm_params` is None, a second variable called 'biases' is added to the result of the convolution operation. Args: inputs: a tensor of size [batch_size, height, width, channels]. num_filters_out: the number of output filters. kernel_size: a list of length 2: [kernel_height, kernel_width] of of the filters. Can be an int if both values are the same. stride: a list of length 2: [stride_height, stride_width]. Can be an int if both strides are the same. Note that presently both strides must have the same value. padding: one of 'VALID' or 'SAME'. activation: activation function. stddev: standard deviation of the truncated guassian weight distribution. bias: the initial value of the biases. weight_decay: the weight decay. batch_norm_params: parameters for the batch_norm. If is None don't use it. is_training: whether or not the model is in training mode. trainable: whether or not the variables should be trainable or not. restore: whether or not the variables should be marked for restore. scope: Optional scope for variable_scope. reuse: whether or not the layer and its variables should be reused. To be able to reuse the layer scope must be given. Returns: a tensor representing the output of the operation.
false
2,452,450
def remove_response_property(xml_root): """Removes response properties if exist.""" if xml_root.tag == "testsuites": properties = xml_root.find("properties") resp_properties = [] for prop in properties: prop_name = prop.get("name", "") if "polarion-response-" in prop_name: resp_properties.append(prop) for resp_property in resp_properties: properties.remove(resp_property) elif xml_root.tag in ("testcases", "requirements"): resp_properties = xml_root.find("response-properties") if resp_properties is not None: xml_root.remove(resp_properties) else: raise Dump2PolarionException(_NOT_EXPECTED_FORMAT_MSG)
[ "def", "remove_response_property", "(", "xml_root", ")", ":", "if", "xml_root", ".", "tag", "==", "\"testsuites\"", ":", "properties", "=", "xml_root", ".", "find", "(", "\"properties\"", ")", "resp_properties", "=", "[", "]", "for", "prop", "in", "properties", ":", "prop_name", "=", "prop", ".", "get", "(", "\"name\"", ",", "\"\"", ")", "if", "\"polarion-response-\"", "in", "prop_name", ":", "resp_properties", ".", "append", "(", "prop", ")", "for", "resp_property", "in", "resp_properties", ":", "properties", ".", "remove", "(", "resp_property", ")", "elif", "xml_root", ".", "tag", "in", "(", "\"testcases\"", ",", "\"requirements\"", ")", ":", "resp_properties", "=", "xml_root", ".", "find", "(", "\"response-properties\"", ")", "if", "resp_properties", "is", "not", "None", ":", "xml_root", ".", "remove", "(", "resp_properties", ")", "else", ":", "raise", "Dump2PolarionException", "(", "_NOT_EXPECTED_FORMAT_MSG", ")" ]
python
Removes response properties if exist.
false
2,193,075
def __init__(self, full_name, first_name='', last_name='', middle_name='', **kwargs): """ Create a Person. Note: the :class:`~billy.scrape.legislators.Legislator` class should be used when dealing with legislators. :param full_name: the person's full name :param first_name: the first name of this legislator (if specified) :param last_name: the last name of this legislator (if specified) :param middle_name: a middle name or initial of this legislator (if specified) """ super(Person, self).__init__('person', **kwargs) self['full_name'] = full_name self['first_name'] = first_name self['last_name'] = last_name self['middle_name'] = middle_name self['suffixes'] = kwargs.get('suffixes', '') self['roles'] = [] self['offices'] = []
[ "def", "__init__", "(", "self", ",", "full_name", ",", "first_name", "=", "''", ",", "last_name", "=", "''", ",", "middle_name", "=", "''", ",", "**", "kwargs", ")", ":", "super", "(", "Person", ",", "self", ")", ".", "__init__", "(", "'person'", ",", "**", "kwargs", ")", "self", "[", "'full_name'", "]", "=", "full_name", "self", "[", "'first_name'", "]", "=", "first_name", "self", "[", "'last_name'", "]", "=", "last_name", "self", "[", "'middle_name'", "]", "=", "middle_name", "self", "[", "'suffixes'", "]", "=", "kwargs", ".", "get", "(", "'suffixes'", ",", "''", ")", "self", "[", "'roles'", "]", "=", "[", "]", "self", "[", "'offices'", "]", "=", "[", "]" ]
python
Create a Person. Note: the :class:`~billy.scrape.legislators.Legislator` class should be used when dealing with legislators. :param full_name: the person's full name :param first_name: the first name of this legislator (if specified) :param last_name: the last name of this legislator (if specified) :param middle_name: a middle name or initial of this legislator (if specified)
false
2,362,959
def add_chain(self, var): """ Create a new ChainFunction and attach to $var. """ chain = FunctionChain(var, []) self._chains[var] = chain self[var] = chain
[ "def", "add_chain", "(", "self", ",", "var", ")", ":", "chain", "=", "FunctionChain", "(", "var", ",", "[", "]", ")", "self", ".", "_chains", "[", "var", "]", "=", "chain", "self", "[", "var", "]", "=", "chain" ]
python
Create a new ChainFunction and attach to $var.
false
2,450,075
def send_welcome_message(self, user, base_url): """ Send welcome mail with email confirmation link """ if not self.require_confirmation and not self.welcome_message: return # get subject subject = '' subjects = self.email_subjects if self.require_confirmation: subject = 'Welcome, please activate your account!' if 'welcome_confirm' in subjects.keys(): subject = subjects['welcome_confirm'] if not self.require_confirmation: subject = 'Welcome to our site!' if 'welcome' in subjects.keys(): subject = subjects['welcome'] # prepare data sender = current_app.config['MAIL_DEFAULT_SENDER'] recipient = user.email link = '{url}/{link}/'.format( url=base_url.rstrip('/'), link=user.email_link ) data = dict(link=link) # render message if self.require_confirmation: html = render_template('user/mail/account-confirm.html', **data) txt = render_template('user/mail/account-confirm.txt', **data) else: html = render_template('user/mail/welcome.html', **data) txt = render_template('user/mail/welcome.txt', **data) # and send mail.send(Message( subject=subject, recipients=[recipient], body=txt, html=html, sender=sender ))
[ "def", "send_welcome_message", "(", "self", ",", "user", ",", "base_url", ")", ":", "if", "not", "self", ".", "require_confirmation", "and", "not", "self", ".", "welcome_message", ":", "return", "subject", "=", "''", "subjects", "=", "self", ".", "email_subjects", "if", "self", ".", "require_confirmation", ":", "subject", "=", "'Welcome, please activate your account!'", "if", "'welcome_confirm'", "in", "subjects", ".", "keys", "(", ")", ":", "subject", "=", "subjects", "[", "'welcome_confirm'", "]", "if", "not", "self", ".", "require_confirmation", ":", "subject", "=", "'Welcome to our site!'", "if", "'welcome'", "in", "subjects", ".", "keys", "(", ")", ":", "subject", "=", "subjects", "[", "'welcome'", "]", "sender", "=", "current_app", ".", "config", "[", "'MAIL_DEFAULT_SENDER'", "]", "recipient", "=", "user", ".", "email", "link", "=", "'{url}/{link}/'", ".", "format", "(", "url", "=", "base_url", ".", "rstrip", "(", "'/'", ")", ",", "link", "=", "user", ".", "email_link", ")", "data", "=", "dict", "(", "link", "=", "link", ")", "if", "self", ".", "require_confirmation", ":", "html", "=", "render_template", "(", "'user/mail/account-confirm.html'", ",", "**", "data", ")", "txt", "=", "render_template", "(", "'user/mail/account-confirm.txt'", ",", "**", "data", ")", "else", ":", "html", "=", "render_template", "(", "'user/mail/welcome.html'", ",", "**", "data", ")", "txt", "=", "render_template", "(", "'user/mail/welcome.txt'", ",", "**", "data", ")", "mail", ".", "send", "(", "Message", "(", "subject", "=", "subject", ",", "recipients", "=", "[", "recipient", "]", ",", "body", "=", "txt", ",", "html", "=", "html", ",", "sender", "=", "sender", ")", ")" ]
python
Send welcome mail with email confirmation link
false
2,577,279
def combine_std(n, mean, std): """Compute combined standard deviation for subsets. See https://stats.stackexchange.com/questions/43159/\ how-to-calculate-pooled-variance-of-two-groups-given-known-group-variances-\ mean for derivation. Parameters ---------- n : numpy array of sample sizes mean : numpy array of sample means std : numpy array of sample standard deviations """ # Calculate weighted mean mean_tot = np.sum(n*mean)/np.sum(n) var_tot = np.sum(n*(std**2 + mean**2))/np.sum(n) - mean_tot**2 return np.sqrt(var_tot)
[ "def", "combine_std", "(", "n", ",", "mean", ",", "std", ")", ":", "mean_tot", "=", "np", ".", "sum", "(", "n", "*", "mean", ")", "/", "np", ".", "sum", "(", "n", ")", "var_tot", "=", "np", ".", "sum", "(", "n", "*", "(", "std", "**", "2", "+", "mean", "**", "2", ")", ")", "/", "np", ".", "sum", "(", "n", ")", "-", "mean_tot", "**", "2", "return", "np", ".", "sqrt", "(", "var_tot", ")" ]
python
Compute combined standard deviation for subsets. See https://stats.stackexchange.com/questions/43159/\ how-to-calculate-pooled-variance-of-two-groups-given-known-group-variances-\ mean for derivation. Parameters ---------- n : numpy array of sample sizes mean : numpy array of sample means std : numpy array of sample standard deviations
false
1,839,776
def apply_analysis_request_partition_interface(portal): """Walks trhough all AR-like partitions registered in the system and applies the IAnalysisRequestPartition marker interface to them """ logger.info("Applying 'IAnalysisRequestPartition' marker interface ...") query = dict(portal_type="AnalysisRequest", isRootAncestor=False) brains = api.search(query, CATALOG_ANALYSIS_REQUEST_LISTING) total = len(brains) for num, brain in enumerate(brains): if num % 100 == 0: logger.info("Applying 'IAnalysisRequestPartition' interface: {}/{}" .format(num, total)) ar = api.get_object(brain) if IAnalysisRequestPartition.providedBy(ar): continue if ar.getParentAnalysisRequest(): alsoProvides(ar, IAnalysisRequestPartition) commit_transaction(portal)
[ "def", "apply_analysis_request_partition_interface", "(", "portal", ")", ":", "logger", ".", "info", "(", "\"Applying 'IAnalysisRequestPartition' marker interface ...\"", ")", "query", "=", "dict", "(", "portal_type", "=", "\"AnalysisRequest\"", ",", "isRootAncestor", "=", "False", ")", "brains", "=", "api", ".", "search", "(", "query", ",", "CATALOG_ANALYSIS_REQUEST_LISTING", ")", "total", "=", "len", "(", "brains", ")", "for", "num", ",", "brain", "in", "enumerate", "(", "brains", ")", ":", "if", "num", "%", "100", "==", "0", ":", "logger", ".", "info", "(", "\"Applying 'IAnalysisRequestPartition' interface: {}/{}\"", ".", "format", "(", "num", ",", "total", ")", ")", "ar", "=", "api", ".", "get_object", "(", "brain", ")", "if", "IAnalysisRequestPartition", ".", "providedBy", "(", "ar", ")", ":", "continue", "if", "ar", ".", "getParentAnalysisRequest", "(", ")", ":", "alsoProvides", "(", "ar", ",", "IAnalysisRequestPartition", ")", "commit_transaction", "(", "portal", ")" ]
python
Walks trhough all AR-like partitions registered in the system and applies the IAnalysisRequestPartition marker interface to them
false
2,255,613
def sanitize_mount(mount): """Returns a quote-unquote sanitized mount path""" sanitized_mount = mount if sanitized_mount.startswith('/'): sanitized_mount = sanitized_mount[1:] if sanitized_mount.endswith('/'): sanitized_mount = sanitized_mount[:-1] sanitized_mount = sanitized_mount.replace('//', '/') return sanitized_mount
[ "def", "sanitize_mount", "(", "mount", ")", ":", "sanitized_mount", "=", "mount", "if", "sanitized_mount", ".", "startswith", "(", "'/'", ")", ":", "sanitized_mount", "=", "sanitized_mount", "[", "1", ":", "]", "if", "sanitized_mount", ".", "endswith", "(", "'/'", ")", ":", "sanitized_mount", "=", "sanitized_mount", "[", ":", "-", "1", "]", "sanitized_mount", "=", "sanitized_mount", ".", "replace", "(", "'//'", ",", "'/'", ")", "return", "sanitized_mount" ]
python
Returns a quote-unquote sanitized mount path
false
2,061,311
def get(self, key): """ Fetches the value with a given keypath from the given node. Key will be encoded into binary array format first. """ validate_is_bytes(key) return self._get(self.root_hash, encode_to_bin(key))
[ "def", "get", "(", "self", ",", "key", ")", ":", "validate_is_bytes", "(", "key", ")", "return", "self", ".", "_get", "(", "self", ".", "root_hash", ",", "encode_to_bin", "(", "key", ")", ")" ]
python
Fetches the value with a given keypath from the given node. Key will be encoded into binary array format first.
false
1,879,544
def configure(filename=None): """This function gives to the user application a chance to define where configuration file should live. Subsequent calls to this function will have no effect, unless you call :func:`reconfigure`. :param str filename: Full path to configuration file. """ global retry if getattr(configure, '_configured', False): return filename = filename or DEFAULT_CONFIG_FILENAME _ensure_directory(filename) parser = SafeConfigParser() if os.path.isfile(filename): with open(filename, 'r') as fp: parser.readfp(fp) if not parser.has_section(RETRY_SECTION): parser.add_section(RETRY_SECTION) parser.set(RETRY_SECTION, 'max_tries', str(constants.BACKOFF_DEFAULT_MAXTRIES)) parser.set(RETRY_SECTION, 'delay', str(constants.BACKOFF_DEFAULT_DELAY)) parser.set(RETRY_SECTION, 'factor', str(constants.BACKOFF_DEFAULT_FACTOR)) with open(filename, 'wb') as fp: parser.write(fp) retry = RetrySettings( max_tries=parser.getint(RETRY_SECTION, 'max_tries'), delay=parser.getint(RETRY_SECTION, 'delay'), factor=parser.getint(RETRY_SECTION, 'factor')) setattr(configure, '_configured', True) setattr(configure, '_configured_filename', filename)
[ "def", "configure", "(", "filename", "=", "None", ")", ":", "global", "retry", "if", "getattr", "(", "configure", ",", "'_configured'", ",", "False", ")", ":", "return", "filename", "=", "filename", "or", "DEFAULT_CONFIG_FILENAME", "_ensure_directory", "(", "filename", ")", "parser", "=", "SafeConfigParser", "(", ")", "if", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "with", "open", "(", "filename", ",", "'r'", ")", "as", "fp", ":", "parser", ".", "readfp", "(", "fp", ")", "if", "not", "parser", ".", "has_section", "(", "RETRY_SECTION", ")", ":", "parser", ".", "add_section", "(", "RETRY_SECTION", ")", "parser", ".", "set", "(", "RETRY_SECTION", ",", "'max_tries'", ",", "str", "(", "constants", ".", "BACKOFF_DEFAULT_MAXTRIES", ")", ")", "parser", ".", "set", "(", "RETRY_SECTION", ",", "'delay'", ",", "str", "(", "constants", ".", "BACKOFF_DEFAULT_DELAY", ")", ")", "parser", ".", "set", "(", "RETRY_SECTION", ",", "'factor'", ",", "str", "(", "constants", ".", "BACKOFF_DEFAULT_FACTOR", ")", ")", "with", "open", "(", "filename", ",", "'wb'", ")", "as", "fp", ":", "parser", ".", "write", "(", "fp", ")", "retry", "=", "RetrySettings", "(", "max_tries", "=", "parser", ".", "getint", "(", "RETRY_SECTION", ",", "'max_tries'", ")", ",", "delay", "=", "parser", ".", "getint", "(", "RETRY_SECTION", ",", "'delay'", ")", ",", "factor", "=", "parser", ".", "getint", "(", "RETRY_SECTION", ",", "'factor'", ")", ")", "setattr", "(", "configure", ",", "'_configured'", ",", "True", ")", "setattr", "(", "configure", ",", "'_configured_filename'", ",", "filename", ")" ]
python
This function gives to the user application a chance to define where configuration file should live. Subsequent calls to this function will have no effect, unless you call :func:`reconfigure`. :param str filename: Full path to configuration file.
false
2,095,106
def __init__(self, path, data, binary_buffer=None): """ :param file: GLTF file name loaded :param data: Metadata (json loaded) :param binary_buffer: Binary buffer when loading glb files """ self.data = data self.path = path self.asset = GLTFAsset(data['asset']) self.materials = [GLTFMaterial(m) for m in data['materials']] if data.get('materials') else [] self.images = [GLTFImage(i) for i in data['images']] if data.get('images') else [] self.samplers = [GLTFSampler(s) for s in data['samplers']] if data.get('samplers') else [] self.textures = [GLTFTexture(t) for t in data['textures']] if data.get('textures') else [] self.scenes = [GLTFScene(s) for s in data['scenes']] if data.get('scenes') else [] self.nodes = [GLTFNode(n) for n in data['nodes']] if data.get('nodes') else [] self.meshes = [GLTFMesh(m) for m in data['meshes']] if data.get('meshes') else [] self.cameras = [GLTFCamera(c) for c in data['cameras']] if data.get('cameras') else [] self.buffer_views = [GLTFBufferView(i, v) for i, v in enumerate(data['bufferViews'])] \ if data.get('bufferViews') else [] self.buffers = [GLTFBuffer(i, b, self.path.parent) for i, b in enumerate(data['buffers'])] \ if data.get('buffers') else [] self.accessors = [GLTFAccessor(i, a) for i, a in enumerate(data['accessors'])] \ if data.get('accessors') else [] # glb files can contain buffer 0 data if binary_buffer: self.buffers[0].data = binary_buffer self._link_data() self.buffers_exist() self.images_exist()
[ "def", "__init__", "(", "self", ",", "path", ",", "data", ",", "binary_buffer", "=", "None", ")", ":", "self", ".", "data", "=", "data", "self", ".", "path", "=", "path", "self", ".", "asset", "=", "GLTFAsset", "(", "data", "[", "'asset'", "]", ")", "self", ".", "materials", "=", "[", "GLTFMaterial", "(", "m", ")", "for", "m", "in", "data", "[", "'materials'", "]", "]", "if", "data", ".", "get", "(", "'materials'", ")", "else", "[", "]", "self", ".", "images", "=", "[", "GLTFImage", "(", "i", ")", "for", "i", "in", "data", "[", "'images'", "]", "]", "if", "data", ".", "get", "(", "'images'", ")", "else", "[", "]", "self", ".", "samplers", "=", "[", "GLTFSampler", "(", "s", ")", "for", "s", "in", "data", "[", "'samplers'", "]", "]", "if", "data", ".", "get", "(", "'samplers'", ")", "else", "[", "]", "self", ".", "textures", "=", "[", "GLTFTexture", "(", "t", ")", "for", "t", "in", "data", "[", "'textures'", "]", "]", "if", "data", ".", "get", "(", "'textures'", ")", "else", "[", "]", "self", ".", "scenes", "=", "[", "GLTFScene", "(", "s", ")", "for", "s", "in", "data", "[", "'scenes'", "]", "]", "if", "data", ".", "get", "(", "'scenes'", ")", "else", "[", "]", "self", ".", "nodes", "=", "[", "GLTFNode", "(", "n", ")", "for", "n", "in", "data", "[", "'nodes'", "]", "]", "if", "data", ".", "get", "(", "'nodes'", ")", "else", "[", "]", "self", ".", "meshes", "=", "[", "GLTFMesh", "(", "m", ")", "for", "m", "in", "data", "[", "'meshes'", "]", "]", "if", "data", ".", "get", "(", "'meshes'", ")", "else", "[", "]", "self", ".", "cameras", "=", "[", "GLTFCamera", "(", "c", ")", "for", "c", "in", "data", "[", "'cameras'", "]", "]", "if", "data", ".", "get", "(", "'cameras'", ")", "else", "[", "]", "self", ".", "buffer_views", "=", "[", "GLTFBufferView", "(", "i", ",", "v", ")", "for", "i", ",", "v", "in", "enumerate", "(", "data", "[", "'bufferViews'", "]", ")", "]", "if", "data", ".", "get", "(", "'bufferViews'", ")", "else", "[", "]", "self", ".", "buffers", "=", "[", "GLTFBuffer", "(", "i", ",", "b", ",", "self", ".", "path", ".", "parent", ")", "for", "i", ",", "b", "in", "enumerate", "(", "data", "[", "'buffers'", "]", ")", "]", "if", "data", ".", "get", "(", "'buffers'", ")", "else", "[", "]", "self", ".", "accessors", "=", "[", "GLTFAccessor", "(", "i", ",", "a", ")", "for", "i", ",", "a", "in", "enumerate", "(", "data", "[", "'accessors'", "]", ")", "]", "if", "data", ".", "get", "(", "'accessors'", ")", "else", "[", "]", "if", "binary_buffer", ":", "self", ".", "buffers", "[", "0", "]", ".", "data", "=", "binary_buffer", "self", ".", "_link_data", "(", ")", "self", ".", "buffers_exist", "(", ")", "self", ".", "images_exist", "(", ")" ]
python
:param file: GLTF file name loaded :param data: Metadata (json loaded) :param binary_buffer: Binary buffer when loading glb files
false
1,670,552
def _num_bytes_to_human_readable(num_bytes): """Returns human readable string of how much memory `num_bytes` fills.""" if num_bytes < (2 ** 10): return "%d B" % num_bytes elif num_bytes < (2 ** 20): return "%.3f KB" % (float(num_bytes) / (2 ** 10)) elif num_bytes < (2 ** 30): return "%.3f MB" % (float(num_bytes) / (2 ** 20)) else: return "%.3f GB" % (float(num_bytes) / (2 ** 30))
[ "def", "_num_bytes_to_human_readable", "(", "num_bytes", ")", ":", "if", "num_bytes", "<", "(", "2", "**", "10", ")", ":", "return", "\"%d B\"", "%", "num_bytes", "elif", "num_bytes", "<", "(", "2", "**", "20", ")", ":", "return", "\"%.3f KB\"", "%", "(", "float", "(", "num_bytes", ")", "/", "(", "2", "**", "10", ")", ")", "elif", "num_bytes", "<", "(", "2", "**", "30", ")", ":", "return", "\"%.3f MB\"", "%", "(", "float", "(", "num_bytes", ")", "/", "(", "2", "**", "20", ")", ")", "else", ":", "return", "\"%.3f GB\"", "%", "(", "float", "(", "num_bytes", ")", "/", "(", "2", "**", "30", ")", ")" ]
python
Returns human readable string of how much memory `num_bytes` fills.
false
2,090,304
def cell_fate(data, groupby='clusters', disconnected_groups=None, self_transitions=False, n_neighbors=None, copy=False): """Computes individual cell endpoints Arguments --------- data: :class:`~anndata.AnnData` Annotated data matrix. groupby: `str` (default: `'clusters'`) Key to which to assign the fates. disconnected_groups: list of `str` (default: `None`) Which groups to treat as disconnected for fate assignment. n_neighbors: `int` (default: `None`) Number of neighbors to restrict transitions to. copy: `bool` (default: `False`) Return a copy instead of writing to `adata`. Returns ------- Returns or updates `adata` with the attributes cell_fate: `.obs` most likely cell fate for each individual cell cell_fate_confidence: `.obs` confidence of transitioning to the assigned fate """ adata = data.copy() if copy else data logg.info('computing cell fates', r=True) n_neighbors = 10 if n_neighbors is None else n_neighbors _adata = adata.copy() vgraph = VelocityGraph(_adata, n_neighbors=n_neighbors, approx=True, n_recurse_neighbors=1) vgraph.compute_cosines() _adata.uns['velocity_graph'] = vgraph.graph _adata.uns['velocity_graph_neg'] = vgraph.graph_neg T = transition_matrix(_adata, self_transitions=self_transitions) I = np.eye(_adata.n_obs) fate = np.linalg.inv(I - T) if issparse(T): fate = fate.A cell_fates = np.array(_adata.obs[groupby][fate.argmax(1)]) if disconnected_groups is not None: idx = _adata.obs[groupby].isin(disconnected_groups) cell_fates[idx] = _adata.obs[groupby][idx] adata.obs['cell_fate'] = cell_fates adata.obs['cell_fate_confidence'] = fate.max(1) / fate.sum(1) strings_to_categoricals(adata) logg.info(' finished', time=True, end=' ' if settings.verbosity > 2 else '\n') logg.hint( 'added\n' ' \'cell_fate\', most likely cell fate (adata.obs)\n' ' \'cell_fate_confidence\', confidence of transitioning to the assigned fate (adata.obs)') return adata if copy else None
[ "def", "cell_fate", "(", "data", ",", "groupby", "=", "'clusters'", ",", "disconnected_groups", "=", "None", ",", "self_transitions", "=", "False", ",", "n_neighbors", "=", "None", ",", "copy", "=", "False", ")", ":", "adata", "=", "data", ".", "copy", "(", ")", "if", "copy", "else", "data", "logg", ".", "info", "(", "'computing cell fates'", ",", "r", "=", "True", ")", "n_neighbors", "=", "10", "if", "n_neighbors", "is", "None", "else", "n_neighbors", "_adata", "=", "adata", ".", "copy", "(", ")", "vgraph", "=", "VelocityGraph", "(", "_adata", ",", "n_neighbors", "=", "n_neighbors", ",", "approx", "=", "True", ",", "n_recurse_neighbors", "=", "1", ")", "vgraph", ".", "compute_cosines", "(", ")", "_adata", ".", "uns", "[", "'velocity_graph'", "]", "=", "vgraph", ".", "graph", "_adata", ".", "uns", "[", "'velocity_graph_neg'", "]", "=", "vgraph", ".", "graph_neg", "T", "=", "transition_matrix", "(", "_adata", ",", "self_transitions", "=", "self_transitions", ")", "I", "=", "np", ".", "eye", "(", "_adata", ".", "n_obs", ")", "fate", "=", "np", ".", "linalg", ".", "inv", "(", "I", "-", "T", ")", "if", "issparse", "(", "T", ")", ":", "fate", "=", "fate", ".", "A", "cell_fates", "=", "np", ".", "array", "(", "_adata", ".", "obs", "[", "groupby", "]", "[", "fate", ".", "argmax", "(", "1", ")", "]", ")", "if", "disconnected_groups", "is", "not", "None", ":", "idx", "=", "_adata", ".", "obs", "[", "groupby", "]", ".", "isin", "(", "disconnected_groups", ")", "cell_fates", "[", "idx", "]", "=", "_adata", ".", "obs", "[", "groupby", "]", "[", "idx", "]", "adata", ".", "obs", "[", "'cell_fate'", "]", "=", "cell_fates", "adata", ".", "obs", "[", "'cell_fate_confidence'", "]", "=", "fate", ".", "max", "(", "1", ")", "/", "fate", ".", "sum", "(", "1", ")", "strings_to_categoricals", "(", "adata", ")", "logg", ".", "info", "(", "' finished'", ",", "time", "=", "True", ",", "end", "=", "' '", "if", "settings", ".", "verbosity", ">", "2", "else", "'\\n'", ")", "logg", ".", "hint", "(", "'added\\n'", "' \\'cell_fate\\', most likely cell fate (adata.obs)\\n'", "' \\'cell_fate_confidence\\', confidence of transitioning to the assigned fate (adata.obs)'", ")", "return", "adata", "if", "copy", "else", "None" ]
python
Computes individual cell endpoints Arguments --------- data: :class:`~anndata.AnnData` Annotated data matrix. groupby: `str` (default: `'clusters'`) Key to which to assign the fates. disconnected_groups: list of `str` (default: `None`) Which groups to treat as disconnected for fate assignment. n_neighbors: `int` (default: `None`) Number of neighbors to restrict transitions to. copy: `bool` (default: `False`) Return a copy instead of writing to `adata`. Returns ------- Returns or updates `adata` with the attributes cell_fate: `.obs` most likely cell fate for each individual cell cell_fate_confidence: `.obs` confidence of transitioning to the assigned fate
false
2,232,522
def create(cls, name, certificate): """ Create a new external VPN CA for signing internal gateway certificates. :param str name: Name of VPN CA :param str certificate: file name, path or certificate string. :raises CreateElementFailed: Failed creating cert with reason :rtype: VPNCertificateCA """ json = {'name': name, 'certificate': certificate} return ElementCreator(cls, json)
[ "def", "create", "(", "cls", ",", "name", ",", "certificate", ")", ":", "json", "=", "{", "'name'", ":", "name", ",", "'certificate'", ":", "certificate", "}", "return", "ElementCreator", "(", "cls", ",", "json", ")" ]
python
Create a new external VPN CA for signing internal gateway certificates. :param str name: Name of VPN CA :param str certificate: file name, path or certificate string. :raises CreateElementFailed: Failed creating cert with reason :rtype: VPNCertificateCA
false
1,816,458
def cmd_gimbal_status(self, args): '''show gimbal status''' master = self.master if 'GIMBAL_REPORT' in master.messages: print(master.messages['GIMBAL_REPORT']) else: print("No GIMBAL_REPORT messages")
[ "def", "cmd_gimbal_status", "(", "self", ",", "args", ")", ":", "master", "=", "self", ".", "master", "if", "'GIMBAL_REPORT'", "in", "master", ".", "messages", ":", "print", "(", "master", ".", "messages", "[", "'GIMBAL_REPORT'", "]", ")", "else", ":", "print", "(", "\"No GIMBAL_REPORT messages\"", ")" ]
python
show gimbal status
false
1,839,145
def cylinder(target, throat_diameter='throat.diameter', throat_length='throat.length'): r""" Calculate surface area for a cylindrical throat Parameters ---------- target : OpenPNM Object The object which this model is associated with. This controls the length of the calculated array, and also provides access to other necessary properties. throat_diameter : string Dictionary key to the throat diameter array. Default is 'throat.diameter'. throat_length : string Dictionary key to the throat length array. Default is 'throat.length'. """ D = target[throat_diameter] L = target[throat_length] value = _sp.pi*D*L return value
[ "def", "cylinder", "(", "target", ",", "throat_diameter", "=", "'throat.diameter'", ",", "throat_length", "=", "'throat.length'", ")", ":", "D", "=", "target", "[", "throat_diameter", "]", "L", "=", "target", "[", "throat_length", "]", "value", "=", "_sp", ".", "pi", "*", "D", "*", "L", "return", "value" ]
python
r""" Calculate surface area for a cylindrical throat Parameters ---------- target : OpenPNM Object The object which this model is associated with. This controls the length of the calculated array, and also provides access to other necessary properties. throat_diameter : string Dictionary key to the throat diameter array. Default is 'throat.diameter'. throat_length : string Dictionary key to the throat length array. Default is 'throat.length'.
false
2,087,277
def _track_change(self, name, value, formatter=None): """Track that a change happened. This function is only needed for manually recording changes that are not captured by changes to properties of this object that are tracked automatically. Classes that inherit from `emulation_mixin` should use this function to record interesting changes in their internal state or events that happen. The `value` parameter that you pass here should be a native python object best representing what the value of the property that changed is. When saved to disk, it will be converted to a string using: `str(value)`. If you do not like the string that would result from such a call, you can pass a custom formatter that will be called as `formatter(value)` and must return a string. Args: name (str): The name of the property that changed. value (object): The new value of the property. formatter (callable): Optional function to convert value to a string. This function will only be called if track_changes() is enabled and `name` is on the whitelist for properties that should be tracked. If `formatter` is not passed or is None, it will default to `str` """ self._emulation_log.track_change(self._emulation_address, name, value, formatter)
[ "def", "_track_change", "(", "self", ",", "name", ",", "value", ",", "formatter", "=", "None", ")", ":", "self", ".", "_emulation_log", ".", "track_change", "(", "self", ".", "_emulation_address", ",", "name", ",", "value", ",", "formatter", ")" ]
python
Track that a change happened. This function is only needed for manually recording changes that are not captured by changes to properties of this object that are tracked automatically. Classes that inherit from `emulation_mixin` should use this function to record interesting changes in their internal state or events that happen. The `value` parameter that you pass here should be a native python object best representing what the value of the property that changed is. When saved to disk, it will be converted to a string using: `str(value)`. If you do not like the string that would result from such a call, you can pass a custom formatter that will be called as `formatter(value)` and must return a string. Args: name (str): The name of the property that changed. value (object): The new value of the property. formatter (callable): Optional function to convert value to a string. This function will only be called if track_changes() is enabled and `name` is on the whitelist for properties that should be tracked. If `formatter` is not passed or is None, it will default to `str`
false
2,011,870
def getTuple(self): """ Returns the shape of the region as (x, y, w, h) """ return (self.x, self.y, self.w, self.h)
[ "def", "getTuple", "(", "self", ")", ":", "return", "(", "self", ".", "x", ",", "self", ".", "y", ",", "self", ".", "w", ",", "self", ".", "h", ")" ]
python
Returns the shape of the region as (x, y, w, h)
false
2,199,544
def update_account_group_name(self, account_id, group_id, body, **kwargs): # noqa: E501 """Update the group name. # noqa: E501 An endpoint for updating a group name. **Example usage:** `curl -X PUT https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/policy-groups/{groupID}/ -d '{\"name\": \"TestGroup2\"}' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.update_account_group_name(account_id, group_id, body, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str account_id: Account ID. (required) :param str group_id: The ID of the group to be updated. (required) :param GroupUpdateInfo body: Details of the group to be created. (required) :return: UpdatedResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.update_account_group_name_with_http_info(account_id, group_id, body, **kwargs) # noqa: E501 else: (data) = self.update_account_group_name_with_http_info(account_id, group_id, body, **kwargs) # noqa: E501 return data
[ "def", "update_account_group_name", "(", "self", ",", "account_id", ",", "group_id", ",", "body", ",", "**", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'asynchronous'", ")", ":", "return", "self", ".", "update_account_group_name_with_http_info", "(", "account_id", ",", "group_id", ",", "body", ",", "**", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "update_account_group_name_with_http_info", "(", "account_id", ",", "group_id", ",", "body", ",", "**", "kwargs", ")", "return", "data" ]
python
Update the group name. # noqa: E501 An endpoint for updating a group name. **Example usage:** `curl -X PUT https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/policy-groups/{groupID}/ -d '{\"name\": \"TestGroup2\"}' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.update_account_group_name(account_id, group_id, body, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str account_id: Account ID. (required) :param str group_id: The ID of the group to be updated. (required) :param GroupUpdateInfo body: Details of the group to be created. (required) :return: UpdatedResponse If the method is called asynchronously, returns the request thread.
false
2,329,391
def request_url(self, request, proxies): """Obtain the url to use when making the final request. If the message is being sent through a HTTP proxy, the full URL has to be used. Otherwise, we should only use the path portion of the URL. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. :param proxies: A dictionary of schemes to proxy URLs. """ proxies = proxies or {} scheme = urlparse(request.url).scheme proxy = proxies.get(scheme) if proxy and scheme != 'https': url = urldefragauth(request.url) else: url = request.path_url return url
[ "def", "request_url", "(", "self", ",", "request", ",", "proxies", ")", ":", "proxies", "=", "proxies", "or", "{", "}", "scheme", "=", "urlparse", "(", "request", ".", "url", ")", ".", "scheme", "proxy", "=", "proxies", ".", "get", "(", "scheme", ")", "if", "proxy", "and", "scheme", "!=", "'https'", ":", "url", "=", "urldefragauth", "(", "request", ".", "url", ")", "else", ":", "url", "=", "request", ".", "path_url", "return", "url" ]
python
Obtain the url to use when making the final request. If the message is being sent through a HTTP proxy, the full URL has to be used. Otherwise, we should only use the path portion of the URL. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param request: The :class:`PreparedRequest <PreparedRequest>` being sent. :param proxies: A dictionary of schemes to proxy URLs.
false
1,774,559
def current(self, fields=None): """Returns dict of current values for all tracked fields""" if fields is None: deferred_fields = self.deferred_fields if deferred_fields: fields = [ field for field in self.fields if field not in deferred_fields ] else: fields = self.fields return dict((f, self.get_field_value(f)) for f in fields)
[ "def", "current", "(", "self", ",", "fields", "=", "None", ")", ":", "if", "fields", "is", "None", ":", "deferred_fields", "=", "self", ".", "deferred_fields", "if", "deferred_fields", ":", "fields", "=", "[", "field", "for", "field", "in", "self", ".", "fields", "if", "field", "not", "in", "deferred_fields", "]", "else", ":", "fields", "=", "self", ".", "fields", "return", "dict", "(", "(", "f", ",", "self", ".", "get_field_value", "(", "f", ")", ")", "for", "f", "in", "fields", ")" ]
python
Returns dict of current values for all tracked fields
false
1,636,646
def check_include_exclude(path_str, include_pat=None, exclude_pat=None): ''' Check for glob or regexp patterns for include_pat and exclude_pat in the 'path_str' string and return True/False conditions as follows. - Default: return 'True' if no include_pat or exclude_pat patterns are supplied - If only include_pat or exclude_pat is supplied: return 'True' if string passes the include_pat test or fails exclude_pat test respectively - If both include_pat and exclude_pat are supplied: return 'True' if include_pat matches AND exclude_pat does not match ''' def _pat_check(path_str, check_pat): if re.match('E@', check_pat): return True if re.search( check_pat[2:], path_str ) else False else: return True if fnmatch.fnmatch( path_str, check_pat ) else False ret = True # -- default true # Before pattern match, check if it is regexp (E@'') or glob(default) if include_pat: if isinstance(include_pat, list): for include_line in include_pat: retchk_include = _pat_check(path_str, include_line) if retchk_include: break else: retchk_include = _pat_check(path_str, include_pat) if exclude_pat: if isinstance(exclude_pat, list): for exclude_line in exclude_pat: retchk_exclude = not _pat_check(path_str, exclude_line) if not retchk_exclude: break else: retchk_exclude = not _pat_check(path_str, exclude_pat) # Now apply include/exclude conditions if include_pat and not exclude_pat: ret = retchk_include elif exclude_pat and not include_pat: ret = retchk_exclude elif include_pat and exclude_pat: ret = retchk_include and retchk_exclude else: ret = True return ret
[ "def", "check_include_exclude", "(", "path_str", ",", "include_pat", "=", "None", ",", "exclude_pat", "=", "None", ")", ":", "def", "_pat_check", "(", "path_str", ",", "check_pat", ")", ":", "if", "re", ".", "match", "(", "'E@'", ",", "check_pat", ")", ":", "return", "True", "if", "re", ".", "search", "(", "check_pat", "[", "2", ":", "]", ",", "path_str", ")", "else", "False", "else", ":", "return", "True", "if", "fnmatch", ".", "fnmatch", "(", "path_str", ",", "check_pat", ")", "else", "False", "ret", "=", "True", "if", "include_pat", ":", "if", "isinstance", "(", "include_pat", ",", "list", ")", ":", "for", "include_line", "in", "include_pat", ":", "retchk_include", "=", "_pat_check", "(", "path_str", ",", "include_line", ")", "if", "retchk_include", ":", "break", "else", ":", "retchk_include", "=", "_pat_check", "(", "path_str", ",", "include_pat", ")", "if", "exclude_pat", ":", "if", "isinstance", "(", "exclude_pat", ",", "list", ")", ":", "for", "exclude_line", "in", "exclude_pat", ":", "retchk_exclude", "=", "not", "_pat_check", "(", "path_str", ",", "exclude_line", ")", "if", "not", "retchk_exclude", ":", "break", "else", ":", "retchk_exclude", "=", "not", "_pat_check", "(", "path_str", ",", "exclude_pat", ")", "if", "include_pat", "and", "not", "exclude_pat", ":", "ret", "=", "retchk_include", "elif", "exclude_pat", "and", "not", "include_pat", ":", "ret", "=", "retchk_exclude", "elif", "include_pat", "and", "exclude_pat", ":", "ret", "=", "retchk_include", "and", "retchk_exclude", "else", ":", "ret", "=", "True", "return", "ret" ]
python
Check for glob or regexp patterns for include_pat and exclude_pat in the 'path_str' string and return True/False conditions as follows. - Default: return 'True' if no include_pat or exclude_pat patterns are supplied - If only include_pat or exclude_pat is supplied: return 'True' if string passes the include_pat test or fails exclude_pat test respectively - If both include_pat and exclude_pat are supplied: return 'True' if include_pat matches AND exclude_pat does not match
false
2,425,063
def process_raw_data(cls, raw_data): """Create a new model using raw API response.""" properties = raw_data.get("properties", {}) raw_content = properties.get("ipSecConfiguration", None) if raw_content is not None: ip_sec = IPSecConfiguration.from_raw_data(raw_content) properties["ipSecConfiguration"] = ip_sec ip_addresses = [] for raw_content in properties.get("ipAddresses", []): ip_addresses.append(IPAddress.from_raw_data(raw_content)) properties["ipAddresses"] = ip_addresses routes = [] for raw_content in properties.get("routes", []): routes.append(NetworkInterfaceRoute.from_raw_data(raw_content)) properties["routes"] = routes raw_content = properties.get("statistics", None) if raw_content is not None: statistics = NetworkInterfaceStatistics.from_raw_data( raw_content) properties["statistics"] = statistics raw_content = properties.get("greConfiguration", None) if raw_content is not None: gre_configuration = GREConfiguration.from_raw_data(raw_content) properties["greConfiguration"] = gre_configuration raw_content = properties.get("l3Configuration", None) if raw_content is not None: l3_configuration = L3Configuration.from_raw_data(raw_content) properties["l3Configuration"] = l3_configuration raw_content = properties.get("gateway", None) if raw_content is not None: gateway = Resource.from_raw_data(raw_content) properties["gateway"] = gateway return super(NetworkConnections, cls).process_raw_data(raw_data)
[ "def", "process_raw_data", "(", "cls", ",", "raw_data", ")", ":", "properties", "=", "raw_data", ".", "get", "(", "\"properties\"", ",", "{", "}", ")", "raw_content", "=", "properties", ".", "get", "(", "\"ipSecConfiguration\"", ",", "None", ")", "if", "raw_content", "is", "not", "None", ":", "ip_sec", "=", "IPSecConfiguration", ".", "from_raw_data", "(", "raw_content", ")", "properties", "[", "\"ipSecConfiguration\"", "]", "=", "ip_sec", "ip_addresses", "=", "[", "]", "for", "raw_content", "in", "properties", ".", "get", "(", "\"ipAddresses\"", ",", "[", "]", ")", ":", "ip_addresses", ".", "append", "(", "IPAddress", ".", "from_raw_data", "(", "raw_content", ")", ")", "properties", "[", "\"ipAddresses\"", "]", "=", "ip_addresses", "routes", "=", "[", "]", "for", "raw_content", "in", "properties", ".", "get", "(", "\"routes\"", ",", "[", "]", ")", ":", "routes", ".", "append", "(", "NetworkInterfaceRoute", ".", "from_raw_data", "(", "raw_content", ")", ")", "properties", "[", "\"routes\"", "]", "=", "routes", "raw_content", "=", "properties", ".", "get", "(", "\"statistics\"", ",", "None", ")", "if", "raw_content", "is", "not", "None", ":", "statistics", "=", "NetworkInterfaceStatistics", ".", "from_raw_data", "(", "raw_content", ")", "properties", "[", "\"statistics\"", "]", "=", "statistics", "raw_content", "=", "properties", ".", "get", "(", "\"greConfiguration\"", ",", "None", ")", "if", "raw_content", "is", "not", "None", ":", "gre_configuration", "=", "GREConfiguration", ".", "from_raw_data", "(", "raw_content", ")", "properties", "[", "\"greConfiguration\"", "]", "=", "gre_configuration", "raw_content", "=", "properties", ".", "get", "(", "\"l3Configuration\"", ",", "None", ")", "if", "raw_content", "is", "not", "None", ":", "l3_configuration", "=", "L3Configuration", ".", "from_raw_data", "(", "raw_content", ")", "properties", "[", "\"l3Configuration\"", "]", "=", "l3_configuration", "raw_content", "=", "properties", ".", "get", "(", "\"gateway\"", ",", "None", ")", "if", "raw_content", "is", "not", "None", ":", "gateway", "=", "Resource", ".", "from_raw_data", "(", "raw_content", ")", "properties", "[", "\"gateway\"", "]", "=", "gateway", "return", "super", "(", "NetworkConnections", ",", "cls", ")", ".", "process_raw_data", "(", "raw_data", ")" ]
python
Create a new model using raw API response.
false
1,869,176
def __init__(self, account_sid, auth_token, workspace_sid, channel_id, **kwargs): """ :param str account_sid: Twilio account sid :param str auth_token: Twilio auth token used to sign the JWT :param str workspace_sid: TaskRouter workspace sid :param str channel_id: TaskRouter channel sid :param kwargs: :param bool allow_web_sockets: shortcut to calling allow_web_sockets, defaults to True :param bool allow_fetch_self: shortcut to calling allow_fetch_self, defaults to True :param bool allow_update_self: shortcut to calling allow_update_self, defaults to False :param bool allow_delete_self: shortcut to calling allow_delete_self, defaults to False :param bool allow_fetch_subresources: shortcut to calling allow_fetch_subresources, defaults to False :param bool allow_update_subresources: shortcut to calling allow_update_subresources, defaults to False :param bool allow_delete_subresources: shortcut to calling allow_delete_subresources, defaults to False :returns a new TaskRouterCapabilityToken with capabilities set depending on kwargs. """ super(TaskRouterCapabilityToken, self).__init__( secret_key=auth_token, issuer=account_sid, algorithm='HS256', nbf=kwargs.get('nbf', Jwt.GENERATE), ttl=kwargs.get('ttl', 3600), valid_until=kwargs.get('valid_until', None), ) self._validate_inputs(account_sid, workspace_sid, channel_id) self.account_sid = account_sid self.auth_token = auth_token self.workspace_sid = workspace_sid self.channel_id = channel_id self.policies = [] if kwargs.get('allow_web_sockets', True): self.allow_web_sockets() if kwargs.get('allow_fetch_self', True): self.allow_fetch_self() if kwargs.get('allow_update_self', False): self.allow_update_self() if kwargs.get('allow_delete_self', False): self.allow_delete_self() if kwargs.get('allow_fetch_subresources', False): self.allow_fetch_subresources() if kwargs.get('allow_delete_subresources', False): self.allow_delete_subresources() if kwargs.get('allow_update_subresources', False): self.allow_update_subresources()
[ "def", "__init__", "(", "self", ",", "account_sid", ",", "auth_token", ",", "workspace_sid", ",", "channel_id", ",", "**", "kwargs", ")", ":", "super", "(", "TaskRouterCapabilityToken", ",", "self", ")", ".", "__init__", "(", "secret_key", "=", "auth_token", ",", "issuer", "=", "account_sid", ",", "algorithm", "=", "'HS256'", ",", "nbf", "=", "kwargs", ".", "get", "(", "'nbf'", ",", "Jwt", ".", "GENERATE", ")", ",", "ttl", "=", "kwargs", ".", "get", "(", "'ttl'", ",", "3600", ")", ",", "valid_until", "=", "kwargs", ".", "get", "(", "'valid_until'", ",", "None", ")", ",", ")", "self", ".", "_validate_inputs", "(", "account_sid", ",", "workspace_sid", ",", "channel_id", ")", "self", ".", "account_sid", "=", "account_sid", "self", ".", "auth_token", "=", "auth_token", "self", ".", "workspace_sid", "=", "workspace_sid", "self", ".", "channel_id", "=", "channel_id", "self", ".", "policies", "=", "[", "]", "if", "kwargs", ".", "get", "(", "'allow_web_sockets'", ",", "True", ")", ":", "self", ".", "allow_web_sockets", "(", ")", "if", "kwargs", ".", "get", "(", "'allow_fetch_self'", ",", "True", ")", ":", "self", ".", "allow_fetch_self", "(", ")", "if", "kwargs", ".", "get", "(", "'allow_update_self'", ",", "False", ")", ":", "self", ".", "allow_update_self", "(", ")", "if", "kwargs", ".", "get", "(", "'allow_delete_self'", ",", "False", ")", ":", "self", ".", "allow_delete_self", "(", ")", "if", "kwargs", ".", "get", "(", "'allow_fetch_subresources'", ",", "False", ")", ":", "self", ".", "allow_fetch_subresources", "(", ")", "if", "kwargs", ".", "get", "(", "'allow_delete_subresources'", ",", "False", ")", ":", "self", ".", "allow_delete_subresources", "(", ")", "if", "kwargs", ".", "get", "(", "'allow_update_subresources'", ",", "False", ")", ":", "self", ".", "allow_update_subresources", "(", ")" ]
python
:param str account_sid: Twilio account sid :param str auth_token: Twilio auth token used to sign the JWT :param str workspace_sid: TaskRouter workspace sid :param str channel_id: TaskRouter channel sid :param kwargs: :param bool allow_web_sockets: shortcut to calling allow_web_sockets, defaults to True :param bool allow_fetch_self: shortcut to calling allow_fetch_self, defaults to True :param bool allow_update_self: shortcut to calling allow_update_self, defaults to False :param bool allow_delete_self: shortcut to calling allow_delete_self, defaults to False :param bool allow_fetch_subresources: shortcut to calling allow_fetch_subresources, defaults to False :param bool allow_update_subresources: shortcut to calling allow_update_subresources, defaults to False :param bool allow_delete_subresources: shortcut to calling allow_delete_subresources, defaults to False :returns a new TaskRouterCapabilityToken with capabilities set depending on kwargs.
false
2,270,079
def print_markdown(data, title=None): """Print data in GitHub-flavoured Markdown format for issues etc. data (dict or list of tuples): Label/value pairs. title (unicode or None): Title, will be rendered as headline 2. """ def excl_value(value): # contains path, i.e. personal info return isinstance(value, basestring_) and Path(value).exists() if isinstance(data, dict): data = list(data.items()) markdown = ["* **{}:** {}".format(l, unicode_(v)) for l, v in data if not excl_value(v)] if title: print("\n## {}".format(title)) print('\n{}\n'.format('\n'.join(markdown)))
[ "def", "print_markdown", "(", "data", ",", "title", "=", "None", ")", ":", "def", "excl_value", "(", "value", ")", ":", "return", "isinstance", "(", "value", ",", "basestring_", ")", "and", "Path", "(", "value", ")", ".", "exists", "(", ")", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "data", "=", "list", "(", "data", ".", "items", "(", ")", ")", "markdown", "=", "[", "\"* **{}:** {}\"", ".", "format", "(", "l", ",", "unicode_", "(", "v", ")", ")", "for", "l", ",", "v", "in", "data", "if", "not", "excl_value", "(", "v", ")", "]", "if", "title", ":", "print", "(", "\"\\n## {}\"", ".", "format", "(", "title", ")", ")", "print", "(", "'\\n{}\\n'", ".", "format", "(", "'\\n'", ".", "join", "(", "markdown", ")", ")", ")" ]
python
Print data in GitHub-flavoured Markdown format for issues etc. data (dict or list of tuples): Label/value pairs. title (unicode or None): Title, will be rendered as headline 2.
false
2,240,454
def encode_file_header(boundary, paramname, filesize, filename=None, filetype=None): """Returns the leading data for a multipart/form-data field that contains file data. ``boundary`` is the boundary string used throughout a single request to separate variables. ``paramname`` is the name of the variable in this request. ``filesize`` is the size of the file data. ``filename`` if specified is the filename to give to this field. This field is only useful to the server for determining the original filename. ``filetype`` if specified is the MIME type of this file. The actual file data should be sent after this header has been sent. """ return MultipartParam(paramname, filesize=filesize, filename=filename, filetype=filetype).encode_hdr(boundary)
[ "def", "encode_file_header", "(", "boundary", ",", "paramname", ",", "filesize", ",", "filename", "=", "None", ",", "filetype", "=", "None", ")", ":", "return", "MultipartParam", "(", "paramname", ",", "filesize", "=", "filesize", ",", "filename", "=", "filename", ",", "filetype", "=", "filetype", ")", ".", "encode_hdr", "(", "boundary", ")" ]
python
Returns the leading data for a multipart/form-data field that contains file data. ``boundary`` is the boundary string used throughout a single request to separate variables. ``paramname`` is the name of the variable in this request. ``filesize`` is the size of the file data. ``filename`` if specified is the filename to give to this field. This field is only useful to the server for determining the original filename. ``filetype`` if specified is the MIME type of this file. The actual file data should be sent after this header has been sent.
false
2,693,292
def adds(self, string): """ Add a string child. """ if isinstance(string, unicode): string = string.encode("utf-8") self.children.append((STRING, str(string)))
[ "def", "adds", "(", "self", ",", "string", ")", ":", "if", "isinstance", "(", "string", ",", "unicode", ")", ":", "string", "=", "string", ".", "encode", "(", "\"utf-8\"", ")", "self", ".", "children", ".", "append", "(", "(", "STRING", ",", "str", "(", "string", ")", ")", ")" ]
python
Add a string child.
false
1,874,022
def docs(ctx, clean=False, browse=False, watch=False): """Build the docs.""" if clean: clean_docs(ctx) if watch: watch_docs(ctx, browse=browse) else: build_docs(ctx, browse=browse)
[ "def", "docs", "(", "ctx", ",", "clean", "=", "False", ",", "browse", "=", "False", ",", "watch", "=", "False", ")", ":", "if", "clean", ":", "clean_docs", "(", "ctx", ")", "if", "watch", ":", "watch_docs", "(", "ctx", ",", "browse", "=", "browse", ")", "else", ":", "build_docs", "(", "ctx", ",", "browse", "=", "browse", ")" ]
python
Build the docs.
false
1,970,691
def as_sorted_list(options): """ Returns all options in a list sorted according to their option numbers. :return: the sorted list """ if len(options) > 0: options = sorted(options, key=lambda o: o.number) return options
[ "def", "as_sorted_list", "(", "options", ")", ":", "if", "len", "(", "options", ")", ">", "0", ":", "options", "=", "sorted", "(", "options", ",", "key", "=", "lambda", "o", ":", "o", ".", "number", ")", "return", "options" ]
python
Returns all options in a list sorted according to their option numbers. :return: the sorted list
false
2,092,281
def tokenize(self): """ Tokenize the input text Scans for instances of perl tags and include directives. Tokenization skips line and block comments. Returns ------- list List of tuples: (typ, start, end) Where: - typ is "perl" or "incl" - start/end mark the first/last char offset of the token """ tokens = [] token_spec = [ ('mlc', r'/\*.*?\*/'), ('slc', r'//[^\r\n]*?\r?\n'), ('perl', r'<%.*?%>'), ('incl', r'`include'), ] tok_regex = '|'.join('(?P<%s>%s)' % pair for pair in token_spec) for m in re.finditer(tok_regex, self.text, re.DOTALL): if m.lastgroup in ("incl", "perl"): tokens.append((m.lastgroup, m.start(0), m.end(0)-1)) return tokens
[ "def", "tokenize", "(", "self", ")", ":", "tokens", "=", "[", "]", "token_spec", "=", "[", "(", "'mlc'", ",", "r'/\\*.*?\\*/'", ")", ",", "(", "'slc'", ",", "r'//[^\\r\\n]*?\\r?\\n'", ")", ",", "(", "'perl'", ",", "r'<%.*?%>'", ")", ",", "(", "'incl'", ",", "r'`include'", ")", ",", "]", "tok_regex", "=", "'|'", ".", "join", "(", "'(?P<%s>%s)'", "%", "pair", "for", "pair", "in", "token_spec", ")", "for", "m", "in", "re", ".", "finditer", "(", "tok_regex", ",", "self", ".", "text", ",", "re", ".", "DOTALL", ")", ":", "if", "m", ".", "lastgroup", "in", "(", "\"incl\"", ",", "\"perl\"", ")", ":", "tokens", ".", "append", "(", "(", "m", ".", "lastgroup", ",", "m", ".", "start", "(", "0", ")", ",", "m", ".", "end", "(", "0", ")", "-", "1", ")", ")", "return", "tokens" ]
python
Tokenize the input text Scans for instances of perl tags and include directives. Tokenization skips line and block comments. Returns ------- list List of tuples: (typ, start, end) Where: - typ is "perl" or "incl" - start/end mark the first/last char offset of the token
false
1,993,223
def _select_file(user_id, api_path, fields, limit): """ Return a SELECT statement that returns the latest N versions of a file. """ query = select(fields).where( _file_where(user_id, api_path), ).order_by( _file_creation_order(), ) if limit is not None: query = query.limit(limit) return query
[ "def", "_select_file", "(", "user_id", ",", "api_path", ",", "fields", ",", "limit", ")", ":", "query", "=", "select", "(", "fields", ")", ".", "where", "(", "_file_where", "(", "user_id", ",", "api_path", ")", ",", ")", ".", "order_by", "(", "_file_creation_order", "(", ")", ",", ")", "if", "limit", "is", "not", "None", ":", "query", "=", "query", ".", "limit", "(", "limit", ")", "return", "query" ]
python
Return a SELECT statement that returns the latest N versions of a file.
false
1,738,817
def process_tick(self, tup): """Called every slide_interval """ curtime = int(time.time()) window_info = WindowContext(curtime - self.window_duration, curtime) tuple_batch = [] for (tup, tm) in self.current_tuples: tuple_batch.append(tup) self.processWindow(window_info, tuple_batch) self._expire(curtime)
[ "def", "process_tick", "(", "self", ",", "tup", ")", ":", "curtime", "=", "int", "(", "time", ".", "time", "(", ")", ")", "window_info", "=", "WindowContext", "(", "curtime", "-", "self", ".", "window_duration", ",", "curtime", ")", "tuple_batch", "=", "[", "]", "for", "(", "tup", ",", "tm", ")", "in", "self", ".", "current_tuples", ":", "tuple_batch", ".", "append", "(", "tup", ")", "self", ".", "processWindow", "(", "window_info", ",", "tuple_batch", ")", "self", ".", "_expire", "(", "curtime", ")" ]
python
Called every slide_interval
false
1,629,543
def datasets_upload_file(self, file_name, content_length, last_modified_date_utc, **kwargs): # noqa: E501 """Get URL and token to start uploading a data file # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.datasets_upload_file(file_name, content_length, last_modified_date_utc, async_req=True) >>> result = thread.get() :param async_req bool :param str file_name: Dataset file name (required) :param int content_length: Content length of file in bytes (required) :param int last_modified_date_utc: Last modified date of file in milliseconds since epoch in UTC (required) :return: Result If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.datasets_upload_file_with_http_info(file_name, content_length, last_modified_date_utc, **kwargs) # noqa: E501 else: (data) = self.datasets_upload_file_with_http_info(file_name, content_length, last_modified_date_utc, **kwargs) # noqa: E501 return data
[ "def", "datasets_upload_file", "(", "self", ",", "file_name", ",", "content_length", ",", "last_modified_date_utc", ",", "**", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "datasets_upload_file_with_http_info", "(", "file_name", ",", "content_length", ",", "last_modified_date_utc", ",", "**", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "datasets_upload_file_with_http_info", "(", "file_name", ",", "content_length", ",", "last_modified_date_utc", ",", "**", "kwargs", ")", "return", "data" ]
python
Get URL and token to start uploading a data file # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.datasets_upload_file(file_name, content_length, last_modified_date_utc, async_req=True) >>> result = thread.get() :param async_req bool :param str file_name: Dataset file name (required) :param int content_length: Content length of file in bytes (required) :param int last_modified_date_utc: Last modified date of file in milliseconds since epoch in UTC (required) :return: Result If the method is called asynchronously, returns the request thread.
false
2,086,200
async def stop_tasks(self, address): """Clear all tasks pertaining to a tile. This coroutine will synchronously cancel all running tasks that were attached to the given tile and wait for them to stop before returning. Args: address (int): The address of the tile we should stop. """ tasks = self._tasks.get(address, []) for task in tasks: task.cancel() asyncio.gather(*tasks, return_exceptions=True) self._tasks[address] = []
[ "async", "def", "stop_tasks", "(", "self", ",", "address", ")", ":", "tasks", "=", "self", ".", "_tasks", ".", "get", "(", "address", ",", "[", "]", ")", "for", "task", "in", "tasks", ":", "task", ".", "cancel", "(", ")", "asyncio", ".", "gather", "(", "*", "tasks", ",", "return_exceptions", "=", "True", ")", "self", ".", "_tasks", "[", "address", "]", "=", "[", "]" ]
python
Clear all tasks pertaining to a tile. This coroutine will synchronously cancel all running tasks that were attached to the given tile and wait for them to stop before returning. Args: address (int): The address of the tile we should stop.
false
1,808,225
def peek(library, session, address, width): """Read an 8, 16 or 32-bit value from the specified address. Corresponds to viPeek* functions of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param address: Source address to read the value. :param width: Number of bits to read. :return: Data read from bus, return value of the library call. :rtype: bytes, :class:`pyvisa.constants.StatusCode` """ if width == 8: return peek_8(library, session, address) elif width == 16: return peek_16(library, session, address) elif width == 32: return peek_32(library, session, address) elif width == 64: return peek_64(library, session, address) raise ValueError('%s is not a valid size. Valid values are 8, 16, 32 or 64' % width)
[ "def", "peek", "(", "library", ",", "session", ",", "address", ",", "width", ")", ":", "if", "width", "==", "8", ":", "return", "peek_8", "(", "library", ",", "session", ",", "address", ")", "elif", "width", "==", "16", ":", "return", "peek_16", "(", "library", ",", "session", ",", "address", ")", "elif", "width", "==", "32", ":", "return", "peek_32", "(", "library", ",", "session", ",", "address", ")", "elif", "width", "==", "64", ":", "return", "peek_64", "(", "library", ",", "session", ",", "address", ")", "raise", "ValueError", "(", "'%s is not a valid size. Valid values are 8, 16, 32 or 64'", "%", "width", ")" ]
python
Read an 8, 16 or 32-bit value from the specified address. Corresponds to viPeek* functions of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param address: Source address to read the value. :param width: Number of bits to read. :return: Data read from bus, return value of the library call. :rtype: bytes, :class:`pyvisa.constants.StatusCode`
false
1,831,150
def reason(self, reason): """ Sets the reason of this CreateRefundRequest. A description of the reason for the refund. Default value: `Refund via API` :param reason: The reason of this CreateRefundRequest. :type: str """ if reason is None: raise ValueError("Invalid value for `reason`, must not be `None`") if len(reason) > 192: raise ValueError("Invalid value for `reason`, length must be less than `192`") self._reason = reason
[ "def", "reason", "(", "self", ",", "reason", ")", ":", "if", "reason", "is", "None", ":", "raise", "ValueError", "(", "\"Invalid value for `reason`, must not be `None`\"", ")", "if", "len", "(", "reason", ")", ">", "192", ":", "raise", "ValueError", "(", "\"Invalid value for `reason`, length must be less than `192`\"", ")", "self", ".", "_reason", "=", "reason" ]
python
Sets the reason of this CreateRefundRequest. A description of the reason for the refund. Default value: `Refund via API` :param reason: The reason of this CreateRefundRequest. :type: str
false
2,026,368
def serializeInstitution(self): """ This method creates a fixture for the "django-tethne_citation_institution" model. Returns ------- institution details which can be written to a file """ institution_data = [] institution_instance_data = [] affiliation_data = [] affiliation_id = tethnedao.getMaxAffiliationID() institution_id = tethnedao.getMaxInstitutionID() institution_instance_id = tethnedao.getMaxInstitutionInstanceID() for paper in self.corpus: if hasattr(paper, 'authorAddress'): paper_key = getattr(paper, Serialize.paper_source_map[self.source]) if type(paper.authorAddress) is unicode: institution_id += 1 institution_instance_id += 1 institute_literal, authors = SerializeUtility.get_auth_inst(paper.authorAddress) institute_row, institute_instance_row = self.get_details_from_inst_literal(institute_literal, institution_id, institution_instance_id, paper_key) if institute_row: institution_data.append(institute_row) institution_instance_data.append(institute_instance_row) if authors: for author in authors: affiliation_id += 1 affiliation_row = self.get_affiliation_details(author, affiliation_id, institute_literal) affiliation_data.append(affiliation_row) elif type(paper.authorAddress) is list: for address in paper.authorAddress: institution_id += 1 institution_instance_id += 1 institute_literal, authors = SerializeUtility.get_auth_inst(address) institute_row, institute_instance_row = self.get_details_from_inst_literal(institute_literal, institution_id, institution_instance_id, paper_key) if institute_row: institution_data.append(institute_row) institution_instance_data.append(institute_instance_row) if authors is None: authors = prevAuthors for author in authors: affiliation_id += 1 affiliation_row = self.get_affiliation_details(author, affiliation_id, institute_literal) affiliation_data.append(affiliation_row) prevAuthors = authors return institution_data, institution_instance_data, affiliation_data
[ "def", "serializeInstitution", "(", "self", ")", ":", "institution_data", "=", "[", "]", "institution_instance_data", "=", "[", "]", "affiliation_data", "=", "[", "]", "affiliation_id", "=", "tethnedao", ".", "getMaxAffiliationID", "(", ")", "institution_id", "=", "tethnedao", ".", "getMaxInstitutionID", "(", ")", "institution_instance_id", "=", "tethnedao", ".", "getMaxInstitutionInstanceID", "(", ")", "for", "paper", "in", "self", ".", "corpus", ":", "if", "hasattr", "(", "paper", ",", "'authorAddress'", ")", ":", "paper_key", "=", "getattr", "(", "paper", ",", "Serialize", ".", "paper_source_map", "[", "self", ".", "source", "]", ")", "if", "type", "(", "paper", ".", "authorAddress", ")", "is", "unicode", ":", "institution_id", "+=", "1", "institution_instance_id", "+=", "1", "institute_literal", ",", "authors", "=", "SerializeUtility", ".", "get_auth_inst", "(", "paper", ".", "authorAddress", ")", "institute_row", ",", "institute_instance_row", "=", "self", ".", "get_details_from_inst_literal", "(", "institute_literal", ",", "institution_id", ",", "institution_instance_id", ",", "paper_key", ")", "if", "institute_row", ":", "institution_data", ".", "append", "(", "institute_row", ")", "institution_instance_data", ".", "append", "(", "institute_instance_row", ")", "if", "authors", ":", "for", "author", "in", "authors", ":", "affiliation_id", "+=", "1", "affiliation_row", "=", "self", ".", "get_affiliation_details", "(", "author", ",", "affiliation_id", ",", "institute_literal", ")", "affiliation_data", ".", "append", "(", "affiliation_row", ")", "elif", "type", "(", "paper", ".", "authorAddress", ")", "is", "list", ":", "for", "address", "in", "paper", ".", "authorAddress", ":", "institution_id", "+=", "1", "institution_instance_id", "+=", "1", "institute_literal", ",", "authors", "=", "SerializeUtility", ".", "get_auth_inst", "(", "address", ")", "institute_row", ",", "institute_instance_row", "=", "self", ".", "get_details_from_inst_literal", "(", "institute_literal", ",", "institution_id", ",", "institution_instance_id", ",", "paper_key", ")", "if", "institute_row", ":", "institution_data", ".", "append", "(", "institute_row", ")", "institution_instance_data", ".", "append", "(", "institute_instance_row", ")", "if", "authors", "is", "None", ":", "authors", "=", "prevAuthors", "for", "author", "in", "authors", ":", "affiliation_id", "+=", "1", "affiliation_row", "=", "self", ".", "get_affiliation_details", "(", "author", ",", "affiliation_id", ",", "institute_literal", ")", "affiliation_data", ".", "append", "(", "affiliation_row", ")", "prevAuthors", "=", "authors", "return", "institution_data", ",", "institution_instance_data", ",", "affiliation_data" ]
python
This method creates a fixture for the "django-tethne_citation_institution" model. Returns ------- institution details which can be written to a file
false
1,812,767
def phase_estimation_circuit(gate: Gate, outputs: Qubits) -> Circuit: """Returns a circuit for quantum phase estimation. The gate has an eigenvector with eigenvalue e^(i 2 pi phase). To run the circuit, the eigenvector should be set on the gate qubits, and the output qubits should be in the zero state. After evolution and measurement, the output qubits will be (approximately) a binary fraction representation of the phase. The output registers can be converted with the aid of the quantumflow.utils.bitlist_to_int() method. >>> import numpy as np >>> import quantumflow as qf >>> N = 8 >>> phase = 1/4 >>> gate = qf.RZ(-4*np.pi*phase, N) >>> circ = qf.phase_estimation_circuit(gate, range(N)) >>> res = circ.run().measure()[0:N] >>> est_phase = int(''.join([str(d) for d in res]), 2) / 2**N # To float >>> print(phase, est_phase) 0.25 0.25 """ circ = Circuit() circ += map_gate(H(), list(zip(outputs))) # Hadamard on all output qubits for cq in reversed(outputs): cgate = control_gate(cq, gate) circ += cgate gate = gate @ gate circ += qft_circuit(outputs).H return circ
[ "def", "phase_estimation_circuit", "(", "gate", ":", "Gate", ",", "outputs", ":", "Qubits", ")", "->", "Circuit", ":", "circ", "=", "Circuit", "(", ")", "circ", "+=", "map_gate", "(", "H", "(", ")", ",", "list", "(", "zip", "(", "outputs", ")", ")", ")", "for", "cq", "in", "reversed", "(", "outputs", ")", ":", "cgate", "=", "control_gate", "(", "cq", ",", "gate", ")", "circ", "+=", "cgate", "gate", "=", "gate", "@", "gate", "circ", "+=", "qft_circuit", "(", "outputs", ")", ".", "H", "return", "circ" ]
python
Returns a circuit for quantum phase estimation. The gate has an eigenvector with eigenvalue e^(i 2 pi phase). To run the circuit, the eigenvector should be set on the gate qubits, and the output qubits should be in the zero state. After evolution and measurement, the output qubits will be (approximately) a binary fraction representation of the phase. The output registers can be converted with the aid of the quantumflow.utils.bitlist_to_int() method. >>> import numpy as np >>> import quantumflow as qf >>> N = 8 >>> phase = 1/4 >>> gate = qf.RZ(-4*np.pi*phase, N) >>> circ = qf.phase_estimation_circuit(gate, range(N)) >>> res = circ.run().measure()[0:N] >>> est_phase = int(''.join([str(d) for d in res]), 2) / 2**N # To float >>> print(phase, est_phase) 0.25 0.25
false