docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Sets default attributes when None. Args: default_attr: dict. Key-val of attr, default-value.
def _set_default_attr(self, default_attr): for attr, val in six.iteritems(default_attr): if getattr(self, attr, None) is None: setattr(self, attr, val)
1,141,801
Serialize self as JSON Args: drop_null: bool, default True. Remove 'empty' attributes. See to_dict. camel: bool, default True. Convert keys to camelCase. indent: int, default None. See json built-in. sort_keys: bool, default False. See json built-in. Return: str: object params.
def to_json(self, drop_null=True, camel=False, indent=None, sort_keys=False): return json.dumps(self.to_dict(drop_null, camel), indent=indent, sort_keys=sort_keys)
1,141,802
Serialize self as dict. Args: drop_null: bool, default True. Remove 'empty' attributes. camel: bool, default True. Convert keys to camelCase. Return: dict: object params.
def to_dict(self, drop_null=True, camel=False): #return _to_dict(self, drop_null, camel) def to_dict(obj, drop_null, camel): if isinstance(obj, (Body, BodyChild)): obj = obj.__dict__ if isinstance(obj, dict): data = {} for attr, val in six.iteritems(obj): if camel: attr = _snake_to_camel(attr) valid_null = (isinstance(val, bool) or val == 0 or (val and to_dict(val, drop_null, camel))) if not drop_null or (drop_null and valid_null): data[attr] = to_dict(val, drop_null, camel) return data elif isinstance(obj, list): data = [] for val in obj: valid_null = (isinstance(val, bool) or val == 0 or (val and to_dict(val, drop_null, camel))) if not drop_null or (drop_null and valid_null): data.append(to_dict(val, drop_null, camel)) return data else: return obj return to_dict(self, drop_null, camel)
1,141,803
Parse JSON request, storing content in object attributes. Args: body: str. HTTP request body. Returns: self
def parse(self, body): if isinstance(body, six.string_types): body = json.loads(body) # version version = body['version'] self.version = version # session session = body['session'] self.session.new = session['new'] self.session.session_id = session['sessionId'] application_id = session['application']['applicationId'] self.session.application.application_id = application_id if 'attributes' in session and session['attributes']: self.session.attributes = session.get('attributes', {}) else: self.session.attributes = {} self.session.user.user_id = session['user']['userId'] self.session.user.access_token = session['user'].get('accessToken', 0) # request request = body['request'] # launch request if request['type'] == 'LaunchRequest': self.request = LaunchRequest() # intent request elif request['type'] == 'IntentRequest': self.request = IntentRequest() self.request.intent = Intent() intent = request['intent'] self.request.intent.name = intent['name'] if 'slots' in intent and intent['slots']: for name, slot in six.iteritems(intent['slots']): self.request.intent.slots[name] = Slot() self.request.intent.slots[name].name = slot['name'] self.request.intent.slots[name].value = slot.get('value') # session ended request elif request['type'] == 'SessionEndedRequest': self.request = SessionEndedRequest() self.request.reason = request['reason'] # common - keep after specific requests to prevent param overwrite self.request.type = request['type'] self.request.request_id = request['requestId'] self.request.timestamp = request['timestamp'] return self
1,141,805
Set response output speech as plain text type. Args: text: str. Response speech used when type is 'PlainText'. Cannot exceed 8,000 characters.
def set_speech_text(self, text): self.response.outputSpeech.type = 'PlainText' self.response.outputSpeech.text = text
1,141,815
Set response output speech as SSML type. Args: ssml: str. Response speech used when type is 'SSML', should be formatted with Speech Synthesis Markup Language. Cannot exceed 8,000 characters.
def set_speech_ssml(self, ssml): self.response.outputSpeech.type = 'SSML' self.response.outputSpeech.ssml = ssml
1,141,816
Set response card as simple type. title and content cannot exceed 8,000 characters. Args: title: str. Title of Simple or Standard type card. content: str. Content of Simple type card.
def set_card_simple(self, title, content): self.response.card.type = 'Simple' self.response.card.title = title self.response.card.content = content
1,141,817
Set response reprompt output speech as plain text type. Args: text: str. Response speech used when type is 'PlainText'. Cannot exceed 8,000 characters.
def set_reprompt_text(self, text): self.response.reprompt.outputSpeech.type = 'PlainText' self.response.reprompt.outputSpeech.text = text
1,141,819
Set response reprompt output speech as SSML type. Args: ssml: str. Response speech used when type is 'SSML', should be formatted with Speech Synthesis Markup Language. Cannot exceed 8,000 characters.
def set_reprompt_ssml(self, ssml): self.response.reprompt.outputSpeech.type = 'SSML' self.response.reprompt.outputSpeech.ssml = ssml
1,141,820
Constructor. Args: conf_path (str): Path to the ZEO configuration file. Default :attr:`~storage.settings.ZEO_CLIENT_PATH`. project_key (str): Project key, which is used for lookups into ZEO. Default :attr:`~storage.settings.TREE_PROJECT_KEY`.
def __init__(self, conf_path=ZEO_CLIENT_PATH, project_key=PROJECT_KEY): super(self.__class__, self).__init__( conf_path=conf_path, project_key=project_key ) # tree.name -> tree self.name_db_key = "name_db" self.name_db = self._get_key_or_create(self.name_db_key) # tree.aleph_id -> tree self.aleph_id_db_key = "aleph_id_db" self.aleph_id_db = self._get_key_or_create(self.aleph_id_db_key) # tree.issn -> tree self.issn_db_key = "issn_db" self.issn_db = self._get_key_or_create(self.issn_db_key) # tree.path -> tree self.path_db_key = "path_db" self.path_db = self._get_key_or_create(self.path_db_key) # sub_tree.path -> parent self.parent_db_key = "parent_db" self.parent_db = self._get_key_or_create(self.parent_db_key)
1,141,933
Add `item` to `db` under `index`. If `index` is not yet in `db`, create it using `default`. Args: db (dict-obj): Dict-like object used to connect to database. index (str): Index used to look in `db`. item (obj): Persistent object, which may be stored in DB. default (func/obj): Reference to function/object, which will be used to create the object under `index`. Default :class:`OOSet`.
def _add_to(self, db, index, item, default=OOSet): row = db.get(index, None) if row is None: row = default() db[index] = row row.add(item)
1,141,934
Add `tree` into database. Args: tree (obj): :class:`.Tree` instance. parent (ref, default None): Reference to parent tree. This is used for all sub-trees in recursive call.
def add_tree(self, tree, parent=None): if tree.path in self.path_db: self.remove_tree_by_path(tree.path) # index all indexable attributes for index in tree.indexes: if not getattr(tree, index): continue self._add_to( getattr(self, index + "_db"), getattr(tree, index), tree, ) if parent: self._add_to(self.parent_db, tree.path, parent) # make sure, that all sub-trees starts with path of parent tree for sub_tree in tree.sub_trees: assert sub_tree.path.startswith(tree.path) for sub_tree in tree.sub_trees: self.add_tree(sub_tree, parent=tree)
1,141,935
Remove the tree from database by given `path`. Args: path (str): Path of the tree.
def remove_tree_by_path(self, path): with transaction.manager: trees = self.path_db.get(path, None) if not trees: return for tree in trees: return self._remove_tree(tree)
1,141,936
Remove `item` from `db` at `index`. Note: This function is inverse to :meth:`._add_to`. Args: db (dict-obj): Dict-like object used to connect to database. index (str): Index used to look in `db`. item (obj): Persistent object, which may be stored in DB.
def _remove_from(self, db, index, item): with transaction.manager: row = db.get(index, None) if row is None: return with transaction.manager: if item in row: row.remove(item) with transaction.manager: if not row: del db[index]
1,141,937
Really remove the tree identified by `tree` instance from all indexes from database. Args: tree (obj): :class:`.Tree` instance. parent (obj, default None): Reference to parent.
def _remove_tree(self, tree, parent=None): # remove sub-trees for sub_tree in tree.sub_trees: self._remove_tree(sub_tree, parent=tree) # remove itself for index in tree.indexes: if not getattr(tree, index): continue self._remove_from( getattr(self, index + "_db"), getattr(tree, index), tree, ) if parent: self._remove_from(self.parent_db, tree.path, parent) self.zeo.pack()
1,141,938
Search trees by `issn`. Args: issn (str): :attr:`.Tree.issn` property of :class:`.Tree`. Returns: set: Set of matching :class:`Tree` instances.
def trees_by_issn(self, issn): return set( self.issn_db.get(issn, OOSet()).keys() )
1,141,939
Search trees by `path`. Args: path (str): :attr:`.Tree.path` property of :class:`.Tree`. Returns: set: Set of matching :class:`Tree` instances.
def trees_by_path(self, path): return set( self.path_db.get(path, OOSet()).keys() )
1,141,940
Search trees by `sub_path` using ``Tree.path.startswith(sub_path)`` comparison. Args: sub_path (str): Part of the :attr:`.Tree.path` property of :class:`.Tree`. Returns: set: Set of matching :class:`Tree` instances.
def trees_by_subpath(self, sub_path): matches = ( self.path_db[tree_path].keys() for tree_path in self.path_db.iterkeys() if tree_path.startswith(sub_path) ) return set(sum(matches, []))
1,141,941
Get parent for given `tree` or `alt` if not found. Args: tree (obj): :class:`.Tree` instance, which is already stored in DB. alt (obj, default None): Alternative value returned when `tree` is not found. Returns: obj: :class:`.Tree` parent to given `tree`.
def get_parent(self, tree, alt=None): parent = self.parent_db.get(tree.path) if not parent: return alt return list(parent)[0]
1,141,942
Initialize the object. Args: conf_path (str): See :attr:`conf_path`. project_key (str, default None): See :attr:`project_key`. If not set, the root of the database is used (this may cause performace issues). run_asyncore_thread (bool, default True): Run external asyncore thread, which handles connections to database? Default True.
def __init__(self, project_key=None, run_asyncore_thread=True): self.project_key = project_key self.default_type = OOBTree self._root = None #: Reference to the root of the database. self._connection = None #: Internal handler for the ZEO connection. if run_asyncore_thread: _init_zeo() self._open_connection() self._init_zeo_root()
1,142,007
Get and initialize the ZEO root object. Args: attempts (int, default 3): How many times to try, if the connection was lost.
def _init_zeo_root(self, attempts=3): try: db_root = self._connection.root() except ConnectionStateError: if attempts <= 0: raise self._open_connection() return self._init_zeo_root(attempts=attempts-1) # init the root, if it wasn't already declared if self.project_key and self.project_key not in db_root: with transaction.manager: db_root[self.project_key] = self.default_type() self._root = db_root[self.project_key] if self.project_key else db_root
1,142,009
Converts a list of uris to elasticsearch json objects args: uri_list: list of uris to convert num: the ending count within the batch batch_num: the batch number
def _index_sub(self, uri_list, num, batch_num): bname = '%s-%s' % (batch_num, num) log.debug("batch_num '%s' starting es_json conversion", bname) qry_data = get_all_item_data([item[0] for item in uri_list], self.tstore_conn, rdfclass=self.rdf_class) log.debug("batch_num '%s-%s' query_complete | count: %s", batch_num, num, len(qry_data)) # path = os.path.join(CFG.dirs.cache, "index_pre") # if not os.path.exists(path): # os.makedirs(path) # with open(os.path.join(path, bname + ".json"), "w") as fo: # fo.write(json.dumps(qry_data)) data = RdfDataset(qry_data) del qry_data log.debug("batch_num '%s-%s' RdfDataset Loaded", batch_num, num) for value in uri_list: try: self.batch_data[batch_num]['main'].append(\ data[value[0]].es_json()) self.count += 1 except KeyError: pass for name, indexer in self.other_indexers.items(): for item in data.json_qry("$.:%s" % name.pyuri): val = item.es_json() if val: self.batch_data[batch_num][name].append(val) self.batch_uris[batch_num].append(item.subject) del data del uri_list log.debug("batch_num '%s-%s' converted to es_json", batch_num, num)
1,142,378
updates the triplestore with success of saves and failues of indexing Args: ----- es_result: the elasticsearch result list action_list: list of elasticsearch action items that were indexed
def _update_triplestore(self, es_result, action_list, **kwargs): idx_time = XsdDatetime(datetime.datetime.utcnow()) uri_keys = {} bnode_keys = {} for item in action_list: try: uri_keys[item['_id']] = item['_source']["uri"] except KeyError: bnode_keys[item['_id']] = item['_id'] error_dict = {} error_bnodes = {} if es_result[1]: for result in es_result[1]: err_item = list(result.values())[0] try: error_dict[uri_keys.pop(err_item['_id'])] = \ XsdString(err_item['error']['reason']) except KeyError: error_bnodes[bnode_keys.pop(err_item['_id'])] = \ XsdString(err_item['error']['reason']) if uri_keys: sparql_good = .format(idx_time=idx_time.sparql, subj_list="<%s>" % ">\n<".join(uri_keys.values())) self.tstore_conn.update_query(sparql_good) # Process any errors that were found. if not error_dict: return # Delete all indexing triples related to the error subjects sparql_error = .format(subj_list="<%s>" % ">\n<".join(error_dict.keys())) self.tstore_conn.update_query(sparql_error) del sparql_error sparql_update = .format( idx_time=idx_time.sparql, error_list="\n".join(["(<%s> %s)" % (key, val.sparql) for key, val in error_dict.items()])) # Create a turtle data stream of the new errors to upload into the # triplestore self.tstore_conn.update_query(sparql_update) del sparql_update
1,142,381
Removes all of the index status triples from the datastore Args: ----- rdf_class: The class of items to remove the status from
def delete_idx_status(self, rdf_class): sparql_template = rdf_types = [rdf_class.uri] + [item.uri for item in rdf_class.subclasses] sparql = sparql_template.format("\n\t\t".join(rdf_types)) log.warn("Deleting index status for %s", rdf_class.uri) return self.tstore_conn.update_query(sparql)
1,142,382
Creates and returns a tuple of either: (pymongo.mongo_client.MongoClient, pymongo.database.Database) or (pymongo.mongo_replica_set_client.MongoReplicaSetClient, pymongo.database.Database) utilizing either a passed in Flask 'app' instance, an imported module object, or a dictionary of config values. Arguments: app_or_object_or_dict -- Flask app instance, and object or a dict
def __new__(cls, app_or_object_or_dict): config = {} app_name = get_app_name() is_flask = False # If the object is a flask.app.Flask instance if flask_app and isinstance(app_or_object_or_dict, flask_app.Flask): config.update(app_or_object_or_dict.config) app_name = app_or_object_or_dict.name is_flask = True # If the object is a dict elif isinstance(app_or_object_or_dict, dict): config.update(app_or_object_or_dict) # Otherwise assume it is some type of object such as a module import else: for name in dir(app_or_object_or_dict): if not name.startswith('_'): config[name] = getattr(app_or_object_or_dict, name) kwargs = config.get('MONGO_KWARGS', {}) # Are we operating with a full MONGO_URI or not? if 'MONGO_URI' in config: # bootstrap configuration from the URL parsed = pymongo.uri_parser.parse_uri(config.get('MONGO_URI')) if 'database' not in parsed: raise ValueError('MongoDB URI does not contain database name') config['MONGO_DATABASE'] = parsed['database'] config['MONGO_USERNAME'] = parsed['username'] config['MONGO_PASSWORD'] = parsed['password'] for option, value in parsed['options'].iteritems(): kwargs.setdefault(option, value) # we will use the URI for connecting instead of HOST/PORT config.pop('MONGO_HOST', None) config.pop('MONGO_PORT', None) host = config.get('MONGO_URI') # Not operating with a full MONGO_URI else: config.setdefault('MONGO_HOST', 'localhost') config.setdefault('MONGO_PORT', 27017) config.setdefault('MONGO_DATABASE', app_name) # these don't have defaults config.setdefault('MONGO_USERNAME', None) config.setdefault('MONGO_PASSWORD', None) try: port = int(config.get('MONGO_PORT')) except ValueError: raise TypeError('MONGO_PORT must be an integer') host = '%s:%s' % (config.get('MONGO_HOST'), config.get('MONGO_PORT')) username = config.get('MONGO_USERNAME') password = config.get('MONGO_PASSWORD') auth = (username, password) if any(auth) and not all(auth): raise Exception('Must set both USERNAME and PASSWORD or neither') database = config.get('MONGO_DATABASE') kwargs['host'] = host # Instantiate the correct pymongo client for replica sets or not if kwargs.get('replicaSet'): cls = pymongo.MongoReplicaSetClient else: cls = pymongo.MongoClient # Instantiate the class using the kwargs obtained from and set # in MONGO_KWARGS mongo = cls(**kwargs) db = mongo[database] # Auth with the DB if username and password were provided if any(auth): db.authenticate(username, password) if is_flask: if not hasattr(app_or_object_or_dict, 'extensions'): app_or_object_or_dict.extensions = {} app_or_object_or_dict.extensions['happymongo'] = (mongo, db) # Return the tuple return mongo, db
1,142,441
Scan `path` for viruses using ``clamd`` antivirus daemon. Args: path (str): Relative or absolute path of file/directory you need to scan. Returns: dict: ``{filename: ("FOUND", "virus type")}`` or blank dict. Raises: ValueError: When the server is not running. AssertionError: When the internal file doesn't exists.
def scan_file(path): path = os.path.abspath(path) assert os.path.exists(path), "Unreachable file '%s'." % path try: cd = pyclamd.ClamdUnixSocket() cd.ping() except pyclamd.ConnectionError: cd = pyclamd.ClamdNetworkSocket() try: cd.ping() except pyclamd.ConnectionError: raise ValueError( "Couldn't connect to clamd server using unix/network socket." ) cd = pyclamd.ClamdUnixSocket() assert cd.ping(), "clamd server is not reachable!" result = cd.scan_file(path) return result if result else {}
1,142,536
Construct a new node. Args: name: Specifying the name of this node. If not given, use strings returned from __str__ method.
def __init__(self, graph, name=None): if not isinstance(graph, BipartiteGraph): raise ValueError( "Given graph is not instance of Bipartite:", graph) self._graph = graph if name: self.name = name else: self.name = super(_Node, self).__str__() self._hash = None
1,142,602
Set summary. Args: v: A new summary. It could be a single number or lists.
def summary(self, v): if hasattr(v, "__iter__"): self._summary = self._summary_cls(v) else: self._summary = self._summary_cls(float(v))
1,142,610
Construct bipartite graph. Args: summary_type: specify summary type class, default value is AverageSummary. alpha: used to compute weight of anomalous scores, default value is 1. credibility: credibility class to be used in this graph. (Default: WeightedCredibility) reviewer: Class of reviewers. product: Class of products.
def __init__( self, summary=AverageSummary, alpha=1, credibility=WeightedCredibility, reviewer=Reviewer, product=Product): self.alpha = alpha self.graph = nx.DiGraph() self.reviewers = [] self.products = [] self._summary_cls = summary self._review_cls = summary.review_class() self.credibility = credibility(self) self._reviewer_cls = reviewer self._product_cls = product
1,142,612
Create a new reviewer. Args: name: name of the new reviewer. anomalous: initial anomalous score. (default: None) Returns: A new reviewer instance.
def new_reviewer(self, name, anomalous=None): n = self._reviewer_cls( self, name=name, credibility=self.credibility, anomalous=anomalous) self.graph.add_node(n) self.reviewers.append(n) return n
1,142,613
Create a new product. Args: name: name of the new product. Returns: A new product instance.
def new_product(self, name): n = self._product_cls(self, name, summary_cls=self._summary_cls) self.graph.add_node(n) self.products.append(n) return n
1,142,614
Add a new review from a given reviewer to a given product. Args: reviewer: an instance of Reviewer. product: an instance of Product. review: a float value. date: date the review issued. Returns: the added new review object. Raises: TypeError: when given reviewer and product aren't instance of specified reviewer and product class when this graph is constructed.
def add_review(self, reviewer, product, review, date=None): if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) elif not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) r = self._review_cls(review, date=date) self.graph.add_edge(reviewer, product, review=r) return r
1,142,615
Retrieve products reviewed by a given reviewer. Args: reviewer: A reviewer. Returns: A list of products which the reviewer reviews. Raises: TypeError: when given reviewer isn't instance of specified reviewer class when this graph is constructed.
def retrieve_products(self, reviewer): if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) return list(self.graph.successors(reviewer))
1,142,616
Retrieve reviewers who reviewed a given product. Args: product: A product specifying reviewers. Returns: A list of reviewers who review the product. Raises: TypeError: when given product isn't instance of specified product class when this graph is constructed.
def retrieve_reviewers(self, product): if not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) return list(self.graph.predecessors(product))
1,142,617
Retrieve review that the given reviewer put the given product. Args: reviewer: An instance of Reviewer. product: An instance of Product. Returns: A review object. Raises: TypeError: when given reviewer and product aren't instance of specified reviewer and product class when this graph is constructed. KeyError: When the reviewer does not review the product.
def retrieve_review(self, reviewer, product): if not isinstance(reviewer, self._reviewer_cls): raise TypeError( "Type of given reviewer isn't acceptable:", reviewer, ", expected:", self._reviewer_cls) elif not isinstance(product, self._product_cls): raise TypeError( "Type of given product isn't acceptable:", product, ", expected:", self._product_cls) try: return self.graph[reviewer][product]["review"] except TypeError: raise KeyError( "{0} does not review {1}.".format(reviewer, product))
1,142,618
Compute a weight function for the given reviewers. Args: reviewers: a set of reviewers to compute weight function. Returns: a function computing a weight for a reviewer.
def _weight_generator(self, reviewers): scores = [r.anomalous_score for r in reviewers] mu = np.average(scores) sigma = np.std(scores) if sigma: def w(v): try: exp = math.exp(self.alpha * (v - mu) / sigma) return 1. / (1. + exp) except OverflowError: return 0. return w else: # Sigma = 0 means all reviews have same anomalous scores. # In this case, all reviews should be treated as same. return lambda v: 1.
1,142,620
Dump credibilities of all products. Args: output: a writable object.
def dump_credibilities(self, output): for p in self.products: json.dump({ "product_id": p.name, "credibility": self.credibility(p) }, output) output.write("\n")
1,142,621
Writes a line to a log file Arguments: namespace {str} -- namespace of document document {dict} -- document to write to the logs
def write_log_file(namespace, document): log_timestamp = asctime(gmtime(document[TS])) with open("{}{}.{}.log".format(LOG_DIR, namespace, DAY_STRING), "a") as f: log_string = dumps({ "datetime": log_timestamp.upper(), "namespace": namespace, "log": document[LOG_KEY] }) f.write("{}\n".format(log_string))
1,142,687
Retrieve token of TonicDNS API. Arguments: usename: TonicDNS API username password: TonicDNS API password server: TonicDNS API server
def get_token(username, password, server): method = 'PUT' uri = 'https://' + server + '/authenticate' token = '' authinfo = { "username": username, "password": password, "local_user": username} token = tonicdns_client(uri, method, token, data=authinfo) return token
1,142,795
TonicDNS API client Arguments: uri: TonicDNS API URI method: TonicDNS API request method token: TonicDNS API authentication token data: Post data to TonicDNS API keyword: Processing keyword of response content: data exist flag raw_flag: True is return response data, False is pretty printing
def tonicdns_client(uri, method, token='', data='', keyword='', content='', raw_flag=False): res = request(uri, method, data, token) if token: if keyword == 'serial': args = {"token": token, "keyword": keyword, "content": content} cur_soa, new_soa = response(uri, method, res, **args) return cur_soa, new_soa else: if content is None: args = {"token": token, "keyword": keyword, "content": content.get('domain')} response(uri, method, res, **args) else: # get sub command args = {"token": token, "keyword": keyword, "raw_flag": raw_flag} data = response(uri, method, res, **args) return data else: args = {"token": token, "keyword": keyword} token = response(uri, method, res, **args) return token
1,142,796
Request to TonicDNS API. Arguments: uri: TonicDNS API URI method: TonicDNS API request method data: Post data to TonicDNS API token: TonicDNS API authentication token
def request(uri, method, data, token=''): socket.setdefaulttimeout(__timeout__) obj = urllib.build_opener(urllib.HTTPHandler) # encoding json encoded = json.JSONEncoder(object).encode(data) # encoding utf8 data_utf8 = encoded.encode('utf-8') req = urllib.Request(uri, data=data_utf8) # When encoded(=data) is False, retrieve data as GET method. if encoded: req.add_header('Content-Type', 'application/json') if token: req.add_header('x-authentication-token', token) req.get_method = lambda: method try: res = obj.open(req) return res except urllib.URLError as e: sys.stderr.write("ERROR: %s\n" % e) exit(1) except urllib.HTTPError as e: sys.stderr.write("ERROR: %s\n" % e) exit(1)
1,142,797
Response of tonicdns_client request Arguments: uri: TonicDNS API URI method: TonicDNS API request method res: Response of against request to TonicDNS API token: TonicDNS API token keyword: Processing keyword content: JSON data raw_flag: True is return responsed raw data, False is pretty print
def response(uri, method, res, token='', keyword='', content='', raw_flag=False): if method == 'GET' or (method == 'PUT' and not token): # response body data = res.read() data_utf8 = data.decode('utf-8') if token: datas = json.loads(data_utf8) else: token = json.loads(data_utf8)['hash'] return token if keyword == 'serial': # filtering with keyword record = search_record(datas, 'SOA')[0] # if SOA record, remove priority unnecessary del record['priority'] # override ttl record['ttl'] = int(record['ttl']) c = JSONConverter(content['domain']) new_record = c.get_soa(record, content) return record, new_record elif keyword: # '--search' option of 'get' subcommand records = search_record(datas, keyword) datas.update({"records": records}) if uri.split('/')[3] == 'template': # 'tmpl_get' subcommand if len(uri.split('/')) == 5: # when specify template identfier #print_formatted(datas) utils.pretty_print(datas) else: # when get all templates for data in datas: #print_formatted(data) utils.pretty_print(datas) else: # 'get' subcommand if raw_flag: return datas else: #print_formatted(datas) if len(uri.split('zone/')) > 1: domain = uri.split('zone/')[1] else: domain = '' utils.pretty_print(datas, keyword, domain) else: # response non JSON data data = res.read() print(data)
1,142,798
Search target JSON -> dictionary Arguments: datas: dictionary of record datas keyword: search keyword (default is null) Key target is "name" or "content" or "type". default null. Either key and type, or on the other hand. When keyword has include camma ",", Separate keyword to name, type, content.
def search_record(datas, keyword): key_name, key_type, key_content = False, False, False if keyword.find(',') > -1: if len(keyword.split(',')) == 3: key_content = keyword.split(',')[2] key_name = keyword.split(',')[0] key_type = keyword.split(',')[1] result = [] for record in datas['records']: if key_name and key_type: if key_content: if ((record['name'].find(key_name) > -1 and record['type'] == key_type and record['content'].find(key_content) > -1)): result.append(record) else: if ((record['name'].find(key_name) > -1 and record['type'] == key_type)): result.append(record) elif ((record['name'].find(keyword) >= 0 or record['content'].find(keyword) >= 0 or record['type'] == keyword)): result.append(record) return result
1,142,799
Create a response object. Args: message: the message object (it could be any type of object.) errors: the errors to attach (it could be any type of object.) status (int): the status of the response. Errors should use the status that is the most appropriate. System failures should set a 500.
def __init__(self, message=None, errors=None, status=None): self.status = status or 200 self.message = message self.errors = errors if self.errors and self.status == 200: self.status = 400
1,142,898
Formulates a log file name that incorporates the provided tags. The log file will be located in ``scgpm_seqresults_dnanexus.LOG_DIR``. Args: tags: `list` of tags to append to the log file name. Each tag will be '_' delimited. Each tag will be added in the same order as provided.
def get_logfile_name(tags): if not os.path.exists(sd.LOG_DIR): os.mkdir(sd.LOG_DIR) filename = "log" for tag in tags: filename += "_{}".format(tag) filename += ".txt" filename = os.path.join(sd.LOG_DIR,filename) return filename
1,142,983
Creates and Adds a file handler (`logging.FileHandler` instance) to the specified logger. Args: logger: The `logging.Logger` instance to add the new file handler to. level: `str`. The logging level for which the handler accepts messages, i.e. `logging.INFO`. tags: `list` of tags to append to the log file name. Each tag will be '_' delimited. Each tag will be added in the same order as provided.
def add_file_handler(logger,level,tags): f_formatter = logging.Formatter('%(asctime)s:%(name)s:\t%(message)s') filename = get_logfile_name(tags) handler = logging.FileHandler(filename=filename,mode="a") handler.setLevel(level) handler.setFormatter(f_formatter) logger.addHandler(handler)
1,142,984
Sets the widget style to the class defaults. Parameters: ----------- colors : str, optional (default lightbg) Whether to use the default IPython light background or dark background or B&W style.
def set_default_style(self, colors='lightbg'): colors = colors.lower() if colors=='lightbg': self.style_sheet = styles.default_light_style_sheet self.syntax_style = styles.default_light_syntax_style elif colors=='linux': self.style_sheet = styles.default_dark_style_sheet self.syntax_style = styles.default_dark_syntax_style elif colors=='nocolor': self.style_sheet = styles.default_bw_style_sheet self.syntax_style = styles.default_bw_syntax_style else: raise KeyError("No such color scheme: %s"%colors)
1,143,088
Opens a Python script for editing. Parameters: ----------- filename : str A path to a local system file. line : int, optional A line of interest in the file.
def _edit(self, filename, line=None): if self.custom_edit: self.custom_edit_requested.emit(filename, line) elif not self.editor: self._append_plain_text('No default editor available.\n' 'Specify a GUI text editor in the `IPythonWidget.editor` ' 'configurable to enable the %edit magic') else: try: filename = '"%s"' % filename if line and self.editor_line: command = self.editor_line.format(filename=filename, line=line) else: try: command = self.editor.format() except KeyError: command = self.editor.format(filename=filename) else: command += ' ' + filename except KeyError: self._append_plain_text('Invalid editor command.\n') else: try: Popen(command, shell=True) except OSError: msg = 'Opening editor with command "%s" failed.\n' self._append_plain_text(msg % command)
1,143,089
sends a passed in action_list to elasticsearch args: data: that data dictionary to save kwargs: id: es id to use / None = auto
def save(self, data, **kwargs): lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3])) lg.setLevel(self.log_level) es = self.es es_index = get2(kwargs, "es_index", self.es_index) reset_index = kwargs.get("reset_index",self.reset_index) doc_type = kwargs.get("doc_type", self.doc_type) op_type = kwargs.get("op_type", self.op_type) id_value = kwargs.get("id") id_field = kwargs.get("id_field") if id_field: id_value = data.get(id_field) if op_type == "index": result = es.index(index=es_index, id=id_value, doc_type=doc_type, body=data) elif op_type == "create": result = es.create(index=es_index, id=id_value, doc_type=doc_type, body=data) lg.debug("Result = \n%s",pp.pformat(result)) return result
1,143,408
Reads a list of data and replaces the ids with es id of the item args: data_list: list of items to find in replace prop: full prop name in es format i.e. make.id lookup_src: dictionary with index doc_type ie. {"es_index": "reference", "doc_type": "device_make"} lookup_fld: field to do the lookup against in full es naming convention i.e. make.raw
def _find_ids(self, data_list, prop, lookup_index, lookup_doc_type, lookup_field): lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3])) lg.setLevel(self.log_level) rtn_list = [] first_time = IsFirst() for item in data_list: # the Dot class will retive and set dictionary values via dot # notation val = Dot(item).get(prop) if val.startswith("#;lookup#;"): lookup_val = val.replace("#;lookup#;", "") lookup_obj = self.get_item(lookup_val, lookup_field) if first_time.first(): lg.debug(" lookup_obj:\n%s", pp.pformat(lookup_obj)) if lookup_obj: rtn_list.append(Dot(item).set(prop, lookup_obj['_id'])) return rtn_list
1,143,409
Get nodes matching the query in the response of the GET request sent to each `url` of `urls` Params: urls: iterable of strs
def query_multiple_endpoints(urls, query: PyQuery): urls = list(urls) return _query_multiple_endpoints(urls, query)
1,143,720
Return a rendered field. Checks to see if the field has a custom widget set. If it does not have a custom widget, the field type is looked up in the lookup dictionary to get the default renderer for this field type. If you wish to not perform any lookup, simply call field() without invoking this method. This method simply overrides a field's widget value. Args: field (wtforms.Field): The Field to render.
def __call__(self, field, **kwargs): if not hasattr(field.widget, '__webwidget__'): if field.type in self._lookup: field.widget = self._lookup[field.type] return field(**kwargs)
1,143,901
Retrieve the versions from PyPI by ``project_name``. Args: project_name (str): The name of the project we wish to retrieve the versions of. Returns: list: Of string versions.
def package_releases(self, project_name): try: return self._connection.package_releases(project_name) except Exception as err: raise PyPIClientError(err)
1,143,991
Storage package information in ``self.packages`` Args: project_name (str): This will be used as a the key in the dictionary. versions (list): List of ``str`` representing the available versions of a project.
def set_package_releases(self, project_name, versions): self.packages[project_name] = sorted(versions, reverse=True)
1,143,992
Returns dict containing zmq configuration arguments parsed from xbahn url Arguments: - u (urlparse.urlparse result) Returns: dict: - id (str): connection index key - typ_str (str): string representation of zmq socket type - typ (int): zmq socket type (PUB, SUB, REQ, REP, PUSH, PULL) - topic (str): subscription topic - url (str): url to use with zmq's bind function
def config_from_url(u, **kwargs): path = u.path.lstrip("/").split("/") if len(path) > 2 or not path: raise AssertionError("zmq url format: zmq://<host>:<port>/<pub|sub>/<topic>") typ = path[0].upper() try: topic = path[1] except IndexError as _: topic = '' param = dict(urllib.parse.parse_qsl(u.query)) #FIXME: should come from schema, maybe zmq+tcp:// ? transport = param.get("transport", "tcp") _id = "%s-%s-%s-%s" % (typ, topic, transport, u.netloc) if kwargs.get("prefix") is not None: _id = "%s-%s" % (kwargs.get("prefix"), _id) return { "id" : _id, "typ_str" : typ, "typ" : getattr(zmq, typ), "topic" : topic, "transport" : transport, "url" : "%s://%s" % (transport, u.netloc) }
1,144,059
records the dictionay in the the 'blank' attribute based on the 'list_blank' path args: ----- current: the current dictionay counts dict_obj: the original dictionary object
def _record_blank(self, current, dict_obj): if not self.list_blank: return if self.list_blank not in current: self.blank.append(dict_obj)
1,144,089
cycles through the object and adds in count values Args: ----- obj: the object to parse path: the current path kwargs: ------- current: a dictionary of counts for current call sub_val: the value to use for subtotal aggregation
def _count_objs(self, obj, path=None, **kwargs): sub_val = None # pdb.set_trace() if isinstance(obj, dict): for key, value in obj.items(): if isinstance(value, (list, dict)): kwargs = self._count_objs(value, self.make_path(key, path), **kwargs) else: if self.make_path(key, path) == self.sub_total: # pdb.set_trace() sub_val = value kwargs['current'] = self._increment_prop(key, path, **kwargs) elif isinstance(obj, list): for item in obj: if isinstance(item, (list, dict)): kwargs = self._count_objs(item, path, **kwargs) else: if path == self.sub_total: pdb.set_trace() sub_val = item kwargs['current'] = self._increment_prop(path, **kwargs) else: kwargs['current'] = self._increment_prop(path, **kwargs) if path == self.sub_total: pdb.set_trace() sub_val = item if kwargs.get('sub_val') is None: kwargs['sub_val'] = sub_val return kwargs
1,144,090
increments the property path count args: ----- prop: the key for the prop path: the path to the prop kwargs: ------- current: dictionary count for the current dictionay
def _increment_prop(self, prop, path=None, **kwargs): new_path = self.make_path(prop, path) if self.method == 'simple': counter = kwargs['current'] else: counter = self.counts try: counter[new_path] += 1 except KeyError: counter[new_path] = 1 return counter
1,144,091
updates counts for the class instance based on the current dictionary counts args: ----- current: current dictionary counts
def update_counts(self, current): for item in current: try: self.counts[item] += 1 except KeyError: self.counts[item] = 1
1,144,092
updates sub_total counts for the class instance based on the current dictionary counts args: ----- current: current dictionary counts sub_key: the key/value to use for the subtotals
def update_subtotals(self, current, sub_key): if not self.sub_counts.get(sub_key): self.sub_counts[sub_key] = {} for item in current: try: self.sub_counts[sub_key][item] += 1 except KeyError: self.sub_counts[sub_key][item] = 1
1,144,093
Converts protobuf message to JSON format. Args: message: The protocol buffers message instance to serialize. including_default_value_fields: If True, singular primitive fields, repeated fields, and map fields will always be serialized. If False, only serialize non-empty fields. Singular message fields and oneof fields are not affected by this option. Returns: A string containing the JSON formatted protocol buffer message.
def MessageToJson(message, including_default_value_fields=False): js = _MessageToJsonObject(message, including_default_value_fields) return json.dumps(js, indent=2)
1,144,139
Parses a JSON representation of a protocol message into a message. Args: text: Message JSON representation. message: A protocol beffer message to merge into. Returns: The same message passed as argument. Raises:: ParseError: On JSON parsing problems.
def Parse(text, message): if not isinstance(text, six.text_type): text = text.decode('utf-8') try: if sys.version_info < (2, 7): # object_pair_hook is not supported before python2.7 js = json.loads(text) else: js = json.loads(text, object_pairs_hook=_DuplicateChecker) except ValueError as e: raise ParseError('Failed to load JSON: {0}.'.format(str(e))) _ConvertMessage(js, message) return message
1,144,142
Convert field value pairs into regular message. Args: js: A JSON object to convert the field value pairs. message: A regular protocol message to record the data. Raises: ParseError: In case of problems converting.
def _ConvertFieldValuePair(js, message): names = [] message_descriptor = message.DESCRIPTOR for name in js: try: field = message_descriptor.fields_by_camelcase_name.get(name, None) if not field: raise ParseError( 'Message type "{0}" has no field named "{1}".'.format( message_descriptor.full_name, name)) if name in names: raise ParseError( 'Message type "{0}" should not have multiple "{1}" fields.'.format( message.DESCRIPTOR.full_name, name)) names.append(name) # Check no other oneof field is parsed. if field.containing_oneof is not None: oneof_name = field.containing_oneof.name if oneof_name in names: raise ParseError('Message type "{0}" should not have multiple "{1}" ' 'oneof fields.'.format( message.DESCRIPTOR.full_name, oneof_name)) names.append(oneof_name) value = js[name] if value is None: message.ClearField(field.name) continue # Parse field value. if _IsMapEntry(field): message.ClearField(field.name) _ConvertMapFieldValue(value, message, field) elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED: message.ClearField(field.name) if not isinstance(value, list): raise ParseError('repeated field {0} must be in [] which is ' '{1}.'.format(name, value)) if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: # Repeated message field. for item in value: sub_message = getattr(message, field.name).add() # None is a null_value in Value. if (item is None and sub_message.DESCRIPTOR.full_name != 'google.protobuf.Value'): raise ParseError('null is not allowed to be used as an element' ' in a repeated field.') _ConvertMessage(item, sub_message) else: # Repeated scalar field. for item in value: if item is None: raise ParseError('null is not allowed to be used as an element' ' in a repeated field.') getattr(message, field.name).append( _ConvertScalarFieldValue(item, field)) elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE: sub_message = getattr(message, field.name) _ConvertMessage(value, sub_message) else: setattr(message, field.name, _ConvertScalarFieldValue(value, field)) except ParseError as e: if field and field.containing_oneof is None: raise ParseError('Failed to parse {0} field: {1}'.format(name, e)) else: raise ParseError(str(e)) except ValueError as e: raise ParseError('Failed to parse {0} field: {1}.'.format(name, e)) except TypeError as e: raise ParseError('Failed to parse {0} field: {1}.'.format(name, e))
1,144,143
Convert a JSON object into a message. Args: value: A JSON object. message: A WKT or regular protocol message to record the data. Raises: ParseError: In case of convert problems.
def _ConvertMessage(value, message): message_descriptor = message.DESCRIPTOR full_name = message_descriptor.full_name if _IsWrapperMessage(message_descriptor): _ConvertWrapperMessage(value, message) elif full_name in _WKTJSONMETHODS: _WKTJSONMETHODS[full_name][1](value, message) else: _ConvertFieldValuePair(value, message)
1,144,144
Convert communication namedtuple to this class. Args: pub (obj): :class:`.Publication` instance which will be converted. Returns: obj: :class:`DBPublication` instance.
def from_comm(cls, pub): filename = None if pub.b64_data: filename = cls._save_to_unique_filename(pub) return cls( title=pub.title, author=pub.author, pub_year=pub.pub_year, isbn=pub.isbn, urnnbn=pub.urnnbn, uuid=pub.uuid, aleph_id=pub.aleph_id, producent_id=pub.producent_id, is_public=pub.is_public, filename=pub.filename, is_periodical=pub.is_periodical, path=pub.path, file_pointer=filename )
1,144,303
Goes through all the options in `data`, and prompts new values. This function calls itself recursively if it finds an inner dictionary. Arguments: data -- The dictionary to loop through. key_string -- The dot-notated key of the dictionary being checked through.
def configure_data(self, data, key_string = ''): # If there's no keys in this dictionary, we have nothing to do. if len(data.keys()) == 0: return # Split the key string by its dots to find out how deep we are. key_parts = key_string.rsplit('.') prefix = ' ' * (len(key_parts) - 1) # Attempt to get a label for this key string. label = self.data.get_label(key_string) # If we are have any key string or label, write the header for this section. if label: p = prefix if len(p) > 0: p += ' ' self.prompt.header(p + '[' + label + ']') # Add to the prefix to indicate options on this level. prefix = prefix + ' ' # If this section has an '_enabled' key, process it first, as it could enable or disable this whole section. if '_enabled' in data.keys(): s = self.data.get_key_string(key_string, '_enabled') #Prompt whether to enable this section. Use the existing value as the default. data['_enabled'] = self.prompt.bool(prefix + self.data.get_label(s), None, data['_enabled']) # Return if this section is now disabled. if data['_enabled'] is False: return # Loop through the rest of the dictionary and prompt for every key. If the value is a dictionary, call this function again for the next level. for k, v in data.iteritems(): # If we hit the '_enabled' key, we've already processed it (but still need it in the dictionary for saving). Ignore it. if k == '_enabled': continue # Get the type of the value at this key, and the dot-noted format of this key. t = type(v) s = self.data.get_key_string(key_string, k) # If the value type is a dictionary, call this function. if t is dict: self.configure_data(v, s) # Otherwise, parse the value. else: label = prefix + self.data.get_label(s) self.parse_value(data, label, s, None, v)
1,144,344
Parses a single value and sets it in an inner dictionary. Arguments: inner_dict -- The dictionary containing the value to set label -- The label to show for the prompt. key -- The key in the dictionary to set the value for. value -- The value to set. If there is a value, don't prompt for one. default -- The default value in the prompt. This is taken from the schema and defines the type of the value.
def parse_value(self, inner_dict, label, key, value, default): t = type(default) if t is dict: return select = self.data.get_select(key) k = key.split('.')[-1] if select: inner_dict[k] = self.prompt.select(label, select, value, default = default) # If the value type is a boolean, prompt a boolean. elif t is bool: inner_dict[k] = self.prompt.bool(label, value, default = default) # If the value is an int, prompt and int. elif t is int: inner_dict[k] = self.prompt.int(label, value, default = default) # If someone has put a list in data, we default it to an empty string. If it had come from the schema, it would already be a string. elif t is list: inner_dict[k] = self.prompt.prompt(label + ':', value, default = '') # If none of the above are true, it's a string. else: inner_dict[k] = self.prompt.prompt(label + ':', value, default = default)
1,144,346
Sets a single value in a preconfigured data file. Arguments: key -- The full dot-notated key to set the value for. value -- The value to set.
def set(self, key, value): d = self.data.data keys = key.split('.') latest = keys.pop() for k in keys: d = d.setdefault(k, {}) schema = Schema().load(self.schema_file) self.data.internal = schema.internal self.parse_value(d, '', key, value, schema.get(key)) self.data.save(self.data_file)
1,144,347
Internal load function. Creates the object and returns it. Arguments: data_file -- The filename to load.
def _load(self, data_file): # Load the data from a file. try: data = Schema().load(data_file) except (Exception, IOError, ValueError) as e: raise e return data
1,144,348
Loads a data file and sets it to self.data. Arguments: data_file -- The filename to load.
def load(self, data_file = None): if not data_file: data_file = '' elif data_file[-1] != '/': data_file += '/' if data_file[-6:] != self.lazy_folder: data_file += self.lazy_folder data_file += self.data_filename self.data = self._load(data_file) return self
1,144,349
Add all keyword arguments to self.args args: **defaults: key and value represents dictionary key and value
def set_defaults(self, **defaults): try: defaults_items = defaults.iteritems() except AttributeError: defaults_items = defaults.items() for key, val in defaults_items: if key not in self.args.keys(): self.args[key] = val
1,144,411
Set more arguments to self.args args: **kwargs: key and value represents dictionary key and value
def set_args(self, **kwargs): try: kwargs_items = kwargs.iteritems() except AttributeError: kwargs_items = kwargs.items() for key, val in kwargs_items: self.args[key] = val
1,144,412
Method to get bestfit line using the defined self.bestfit_func method args: x_min: scalar, default=min(x) minimum x value of the line x_max: scalar, default=max(x) maximum x value of the line resolution: int, default=1000 how many steps between x_min and x_max returns: [bestfit_x, bestfit_y]
def get_bestfit_line(self, x_min=None, x_max=None, resolution=None): x = self.args["x"] if x_min is None: x_min = min(x) if x_max is None: x_max = max(x) if resolution is None: resolution = self.args.get("resolution", 1000) bestfit_x = np.linspace(x_min, x_max, resolution) return [bestfit_x, self.bestfit_func(bestfit_x)]
1,144,414
Get Root Mean Square Error using self.bestfit_func args: x_min: scalar, default=min(x) minimum x value of the line x_max: scalar, default=max(x) maximum x value of the line resolution: int, default=1000 how many steps between x_min and x_max
def get_rmse(self, data_x=None, data_y=None): if data_x is None: data_x = np.array(self.args["x"]) if data_y is None: data_y = np.array(self.args["y"]) if len(data_x) != len(data_y): raise ValueError("Lengths of data_x and data_y are different") rmse_y = self.bestfit_func(data_x) return np.sqrt(np.mean((rmse_y - data_y) ** 2))
1,144,415
Get Mean Absolute Error using self.bestfit_func args: data_x: array_like, default=x x value used to determine rmse, used if only a section of x is to be calculated data_y: array_like, default=y y value used to determine rmse, used if only a section of y is to be calculated
def get_mae(self, data_x=None, data_y=None): if data_x is None: data_x = np.array(self.args["x"]) if data_y is None: data_y = np.array(self.args["y"]) if len(data_x) != len(data_y): raise ValueError("Lengths of data_x and data_y are different") mae_y = self.bestfit_func(data_x) return np.mean(abs(mae_y - data_y))
1,144,416
bind and return a connection instance from url arguments: - url (str): xbahn connection url
def listen(url, prefix=None, **kwargs): return listener(url, prefix=get_prefix(prefix), **kwargs)
1,144,478
connect and return a connection instance from url arguments: - url (str): xbahn connection url
def connect(url, prefix=None, **kwargs): return connection(url, prefix=get_prefix(prefix), **kwargs)
1,144,479
Executes the command given specific arguments as an input. Args: correlation_id: a unique correlation/transaction id args: command arguments Returns: an execution result. Raises: ApplicationException: when execution fails for whatever reason.
def execute(self, correlation_id, args): # Validate arguments if self._schema != None: self.validate_and_throw_exception(correlation_id, args) # Call the function try: return self._function(correlation_id, args) # Intercept unhandled errors except Exception as ex: raise InvocationException( correlation_id, "EXEC_FAILED", "Execution " + self._name + " failed: " + str(ex) ).with_details("command", self._name).wrap(ex)
1,144,520
Generates a property class from the defintion dictionary args: prop_defs: the dictionary defining the property prop_name: the base name of the property cls_name: the name of the rdf_class with which the property is associated
def make_property(prop_defs, prop_name, cls_names=[], hierarchy=[]): register = False try: cls_names.remove('RdfClassBase') except ValueError: pass if cls_names: new_name = "%s_%s" % (prop_name.pyuri, "_".join(cls_names)) prop_defs['kds_appliesToClass'] = cls_names elif not cls_names: cls_names = [Uri('kdr_AllClasses')] register = True new_name = prop_name else: new_name = prop_name new_prop = types.new_class(new_name, (RdfPropertyBase, list,), {'metaclass': RdfPropertyMeta, 'prop_defs': prop_defs, 'class_names': cls_names, 'prop_name': prop_name, 'hierarchy': hierarchy}) if register: global properties global domain_props properties[new_name] = new_prop for domain in new_prop.rdfs_domain: try: # domain_props[domain].append(new_prop) domain_props[domain][prop_name] = prop_defs except KeyError: # domain_props[domain] = [new_prop] domain_props[domain] = {} domain_props[domain][prop_name] = prop_defs except TypeError: pass return new_prop
1,144,558
Generates a property class linked to the rdfclass args: prop: unlinked property class cls_name: the name of the rdf_class with which the property is associated cls_object: the rdf_class
def link_property(prop, cls_object): register = False cls_name = cls_object.__name__ if cls_name and cls_name != 'RdfBaseClass': new_name = "%s_%s" % (prop._prop_name, cls_name) else: new_name = prop._prop_name new_prop = types.new_class(new_name, (prop,), {'metaclass': RdfLinkedPropertyMeta, 'cls_name': cls_name, 'prop_name': prop._prop_name, 'linked_cls': cls_object}) return new_prop
1,144,559
Reads through the prop_defs and returns a dictionary filtered by the current class args: prop_defs: the defintions from the rdf vocabulary defintion cls_object: the class object to tie the property cls_names: the name of the classes
def filter_prop_defs(prop_defs, hierarchy, cls_names): def _is_valid(test_list, valid_list): for test in test_list: if test in valid_list: return True return False new_dict = {} valid_classes = [Uri('kdr_AllClasses')] + cls_names + hierarchy for def_name, value in prop_defs.items(): new_dict[def_name] = [] empty_def = [] try: for item in value: if item.get('kds_appliesToClass'): if _is_valid(item['kds_appliesToClass'], valid_classes): new_dict[def_name].append(item) else: empty_def.append(item) if not new_dict[def_name]: new_dict[def_name] = empty_def except AttributeError: new_dict[def_name] = value return new_dict
1,144,561
Examines and adds any missing defs to the prop_defs dictionary for use with the RdfPropertyMeta.__prepare__ method Args: ----- prop_defs: the defintions from the rdf vocabulary defintion prop_name: the property name cls_names: the name of the associated classes Returns: -------- prop_defs
def prepare_prop_defs(prop_defs, prop_name, cls_names): def get_def(prop_defs, def_fields, default_val=None): rtn_list = [] for fld in def_fields: if prop_defs.get(fld): rtn_list += prop_defs.get(fld) if not rtn_list and default_val: rtn_list.append(default_val) elif rtn_list: try: rtn_list = list(set(rtn_list)) except TypeError as e: # This deals with a domain that required a conjunction of two # rdf_Classes # pdb.set_trace() new_rtn = [] for item in rtn_list: if isinstance(item, MODULE.rdfclass.RdfClassBase): new_rtn.append(\ "|".join(merge_rdf_list(item['owl_unionOf']))) elif isinstance(item, list): new_rtn.append("|".join(item)) else: new_rtn.append(item) rtn_list = list(set(new_rtn)) new_rtn = [] for item in rtn_list: if "|" in item: new_rtn.append([Uri(domain) \ for domain in item.split("|")]) else: new_rtn.append(Uri(item)) rtn_list = new_rtn # pdb.set_trace() return rtn_list required_def_defaults = { Uri('kds_rangeDef'): [{}], Uri('rdfs_range'): [Uri("xsd_string")], Uri('rdfs_domain'): cls_names, Uri('rdfs_label'): [NSM.nouri(prop_name)], Uri('kds_formDefault'): [{ Uri('kds:appliesToClass'): Uri('kdr:AllClasses'), Uri('kds:formFieldName'): "emailaddr", Uri('kds:formLabelName'): [NSM.nouri(prop_name)], Uri('kds:formFieldHelp'): find_values(DESCRIPTION_FIELDS, prop_defs, None), Uri('kds:fieldType'): { Uri('rdf:type'): Uri('kdr:TextField') } }], Uri('kds_propertyValidation'): [], Uri('kds_propertySecurity'): [], Uri('kds_propertyProcessing'): [] } for prop in required_def_defaults: if prop not in prop_defs.keys(): prop_defs[prop] = required_def_defaults[prop] prop_defs['rdfs_domain'] = get_def(prop_defs, DOMAIN_FIELDS, cls_names) prop_defs['rdfs_range'] = get_def(prop_defs, RANGE_FIELDS, Uri('xsd_string')) return prop_defs
1,144,562
reads through the prop attributes and filters them for the associated class and returns a dictionary for meta_class __prepare__ args: prop: class object to read cls_name: the name of the class to tie the porperty to
def tie_prop_to_class(prop, cls_name): attr_list = [attr for attr in dir(prop) if type(attr, Uri)] prop_defs = kwargs.pop('prop_defs') prop_name = kwargs.pop('prop_name') cls_name = kwargs.pop('cls_name') if cls_name == 'RdfClassBase': return {} doc_string = make_doc_string(name, prop_defs, bases, None) new_def = prepare_prop_defs(prop_defs, prop_name, cls_name) new_def['__doc__'] = doc_string new_def['_cls_name'] = cls_name new_def['_prop_name'] = prop_name if prop_name == 'rdf_type': new_def['append'] = unique_append # if prop_name == 'rdf_type': # pdb.set_trace() new_def['_init_processors'] = get_processors('kds_initProcessor', prop_defs) new_def['_es_processors'] = get_processors('kds_esProcessor', prop_defs, 'es_values') # pdb.set_trace() return new_def
1,144,563
reads the prop defs and adds applicable processors for the property Args: processor_cat(str): The category of processors to retreive prop_defs: property defintions as defined by the rdf defintions data_attr: the attr to manipulate during processing. Returns: list: a list of processors
def get_processors(processor_cat, prop_defs, data_attr=None): processor_defs = prop_defs.get(processor_cat,[]) processor_list = [] for processor in processor_defs: proc_class = PropertyProcessor[processor['rdf_type'][0]] processor_list.append(proc_class(processor.get('kds_params', [{}]), data_attr)) return processor_list
1,144,565
takes an rdf list and merges it into a python list args: rdf_list: the RdfDataset object with the list values returns: list of values
def merge_rdf_list(rdf_list): # pdb.set_trace() if isinstance(rdf_list, list): rdf_list = rdf_list[0] rtn_list = [] # for item in rdf_list: item = rdf_list if item.get('rdf_rest') and item.get('rdf_rest',[1])[0] != 'rdf_nil': rtn_list += merge_rdf_list(item['rdf_rest'][0]) if item.get('rdf_first'): rtn_list += item['rdf_first'] rtn_list.reverse() return rtn_list
1,144,566
Get a named profile from the CONFIG_FILE. Args: name The name of the profile to load. Returns: A dictionary with the profile's ``repo`` and ``token`` values.
def read_profile(name): config = configparser.ConfigParser() config.read(CONFIG_FILE) profile = config[name] repo = profile["repo"] token = profile["token"] return {"repo": repo, "token": token}
1,144,748
Inits a Skill class with proxy request and response. Args: app_id: str, default None. Skill application ID, declare to validate against application ID in the request.
def __init__(self, app_id=None): self.valid = Valid(app_id) self.request = RequestBody() self.response = ResponseBody() self.logic = dict() self.launch = self.register('LaunchRequest') self.intent = self.register self.session_ended = self.register('SessionEndedRequest')
1,144,896
Creates instance of timing object that calculates elapsed time and stores it to specified performance counters component under specified name. Args: counter: a name of the counter to record elapsed time interval. callback: a performance counters component to store calculated value.
def __init__(self, counter = None, callback = None): self._counter = counter self._callback = callback self._start = time.clock() * 1000
1,145,002
Fetches all messages at @conn from @directory. Params: conn IMAP4_SSL connection directory The IMAP directory to look for readonly readonly mode, true or false Returns: List of subject-body tuples
def fetch_all_messages(self, conn, directory, readonly): conn.select(directory, readonly) message_data = [] typ, data = conn.search(None, 'All') # Loop through each message object for num in data[0].split(): typ, data = conn.fetch(num, '(RFC822)') for response_part in data: if isinstance(response_part, tuple): email_parser = email.parser.BytesFeedParser() email_parser.feed(response_part[1]) msg = email_parser.close() body = self.get_body(msg) subject = self.get_subject(msg) message_data.append((subject, body)) return message_data
1,145,027
Create a repr of a property based class quickly Args: obj -- instance of class *attrs -- list of attrs to add to the representation **kwargs -- Extra arguments to add that are not captured as attributes Returns: A string representing the class
def rep(obj, *attrs, **kwargs): s = obj.__class__.__name__ args = chain(((attr, getattr(obj, attr)) for attr in attrs), kwargs.items()) s += '(%s)' % ','.join('{}={!r}'.format(k, v) for k, v in args) return s
1,145,095
Not a decorator, but a helper function to retrieve the cached item for a key created via get_cache_key. Args: - cache_key: if there was a specific cache key used to cache the function, it should be provided here. If not this should be None - func: the function which was cache - *func_args: arguments of the function - **func_kwargs: keyword arguments of this function
def get_cached_item(cache_key, alternative_cache_key, *func_args, **func_kwargs): key = get_cache_key(cache_key, func, *func_args, **func_kwargs) return cache.get(key)
1,145,123
Converts a string to Duration. Args: value: A string to be converted. The string must end with 's'. Any fractional digits (or none) are accepted as long as they fit into precision. For example: "1s", "1.01s", "1.0000001s", "-3.100s Raises: ParseError: On parsing problems.
def FromJsonString(self, value): if len(value) < 1 or value[-1] != 's': raise ParseError( 'Duration must end with letter "s": {0}.'.format(value)) try: pos = value.find('.') if pos == -1: self.seconds = int(value[:-1]) self.nanos = 0 else: self.seconds = int(value[:pos]) if value[0] == '-': self.nanos = int(round(float('-0{0}'.format(value[pos: -1])) *1e9)) else: self.nanos = int(round(float('0{0}'.format(value[pos: -1])) *1e9)) except ValueError: raise ParseError( 'Couldn\'t parse duration: {0}.'.format(value))
1,145,202
Replace my :attr:`scopes` for the duration of the with block. My global scope is not replaced. Args: new_scopes (list of dict-likes): The new :attr:`scopes` to use.
def scopes_as(self, new_scopes): old_scopes, self.scopes = self.scopes, new_scopes yield self.scopes = old_scopes
1,145,444
Add a new innermost scope for the duration of the with block. Args: new_scope (dict-like): The scope to add.
def new_scope(self, new_scope={}): old_scopes, self.scopes = self.scopes, self.scopes.new_child(new_scope) yield self.scopes = old_scopes
1,145,445
Add a new value to me. Args: val (LispVal): The value to be added. Returns: LispVal: The added value. Raises: ~parthial.errs.LimitationError: If I already contain the maximum number of elements.
def new(self, val): if len(self.things) >= self.max_things: raise LimitationError('too many things') self.things.add(val) return val
1,145,446
Recursively add a new value and its children to me. Args: val (LispVal): The value to be added. Returns: LispVal: The added value.
def rec_new(self, val): if val not in self.things: for child in val.children(): self.rec_new(child) self.new(val) return val
1,145,447
Recursively add a new value and its children to me, and assign a variable to it. Args: k (str): The name of the variable to assign. val (LispVal): The value to be added and assigned. Returns: LispVal: The added value.
def add_rec_new(self, k, val): self.rec_new(val) self[k] = val return val
1,145,448
Look up a variable. Args: k (str): The name of the variable to look up. Returns: LispVal: The value assigned to the variable. Raises: KeyError: If the variable has not been assigned to.
def __getitem__(self, k): chain = ChainMap(self.scopes, self.globals) return chain.__getitem__(k)
1,145,450
Check whether a variable has been assigned to. This is **not** the same kind of element-of as described in the class documentation. Args: k (str): The name of the variable to check. Returns: bool: Whether or not the variable has been assigned to.
def __contains__(self, k): chain = ChainMap(self.scopes, self.globals) return chain.__contains__(k)
1,145,451
:meth:`eval` an expression in a new, temporary :class:`Context`. This should be safe to use directly on user input. Args: expr (LispVal): The expression to evaluate. *args: Args for the :class:`Context` constructor. **kwargs: Kwargs for the :class:`Context` constructor.
def eval_in_new(cls, expr, *args, **kwargs): ctx = cls(*args, **kwargs) ctx.env.rec_new(expr) return ctx.eval(expr)
1,145,454