docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Returns an integer that has the value of the decimal string: dec_str*10^decimals Arguments: dec_str (string) that represents a decimal number decimals (int): number of decimals for creating the integer output Returns: (int) Raises: ValueError if dec_string is not a valid decimal string TypeError if decimals is not an integer Note: values may be truncated (not rounded).
def decstr2int(dec_str, decimals): if not isinstance(decimals, int): raise TypeError('decimals must be an integer') try: dollars, cents = dec_str.split('.') except ValueError: if '.' not in dec_str: dollars = dec_str cents = '0' else: raise ValueError('Invalid decimal string') else: if len(cents) < decimals: cents = cents.ljust(decimals, '0') elif decimals < 1: cents = '0' elif len(cents) > decimals: cents = cents[:decimals] try: cents = int(cents) except: cents = 0 try: return int(int(dollars) * (10 ** decimals)) + cents except: raise ValueError('Invalid decimal string')
1,131,989
Deposits specified neopoints into the user's account, returns result Parameters: amount (int) -- Amount of neopoints to deposit Returns bool - True if successful, False otherwise Raises notEnoughNps
def deposit(self, amount): pg = self.usr.getPage("http://www.neopets.com/bank.phtml") if self.usr.nps < int(amount): raise notEnoughNps form = pg.form(action="process_bank.phtml") form.update({'type': 'deposit', 'amount': str(amount)}) form.usePin = True pg = form.submit() # Success redirects to bank page if "It's great to see you again" in pg.content: self.__loadDetails(pg) return True else: logging.getLogger("neolib.user").info("Failed to deposit NPs for unknown reason. User NPs: " + str(self.usr.nps) + ". Amount: " + str(amount), {'pg': pg}) return False
1,132,062
Withdraws specified neopoints from the user's account, returns result Parameters: amount (int) -- Amount of neopoints to withdraw Returns bool - True if successful, False otherwise Raises notEnoughBalance
def withdraw(self, amount): pg = self.usr.getPage("http://www.neopets.com/bank.phtml") try: results = pg.find(text = "Account Type:").parent.parent.parent.find_all("td", align="center") self.balance = results[1].text.replace(" NP", "") except Exception: logging.getLogger("neolib.user").exception("Could not parse user's bank balance.", {'pg': pg}) if int(amount) > int(self.balance.replace(",", "")): raise notEnoughBalance form = pg.form(action="process_bank.phtml") form.update({'type': 'withdraw', 'amount': str(amount)}) form.usePin = True pg = form.submit() # Success redirects to bank page if "It's great to see you again" in pg.content: self.__loadDetails(pg) return True else: logging.getLogger("neolib.user").info("Failed to withdraw NPs for unknown reason. User NPs: " + str(self.usr.nps) + ". Amount: " + str(amount), {'pg': pg}) return False
1,132,063
Returns the specified hook. Args: hook_name (str) Returns: str - (the content of) the hook Raises: HookNotFoundError
def get_hook(hook_name): if not pkg_resources.resource_exists(__name__, hook_name): raise HookNotFoundError return pkg_resources.resource_string(__name__, hook_name)
1,132,110
Returns ICachableItem that matches id Args: id: String that identifies the item to return whose key matches
def getById(self, Id): # we need to create a new object to insure we don't corrupt the generator count csvsource = CSVSource(self.source, self.factory, self.key()) try: for item in csvsource.items(): if Id == item.getId(): return item except StopIteration: return None
1,132,420
Wrap a widget to conform with Bootstrap's html control design. Args: input_class: Class to give to the rendered <input> control. add_meta: bool:
def bootstrap_styled(cls=None, add_meta=True, form_group=True, input_class='form-control'): def real_decorator(cls): class NewClass(cls): pass NewClass.__name__ = cls.__name__ NewClass = custom_widget_wrapper(NewClass) _call = NewClass.__call__ def call(*args, **kwargs): if input_class: kwargs.setdefault('class', input_class) return _call(*args, **kwargs) if add_meta: call = meta_wrapped(call) if form_group: call = form_group_wrapped(call) NewClass.__call__ = call return NewClass if cls: # Allow calling decorator(cls) instead of decorator()(cls) rv = real_decorator(cls) return rv return real_decorator
1,132,567
Walk on rule content tokens to return a dict of properties. This is pretty naive and will choke/fail on everything that is more evolved than simple ``ident(string):value(string)`` Arguments: rule (tinycss2.ast.QualifiedRule): Qualified rule object as returned by tinycss2. Returns: dict: Dictionnary of retrieved variables and properties.
def digest_content(self, rule): data = OrderedDict() current_key = None for token in rule.content: # Assume first identity token is the property name if token.type == 'ident': # Ignore starting '-' from css variables name = token.value if name.startswith('-'): name = name[1:] current_key = name data[current_key] = None # Assume first following string token is the property value. if token.type == 'string': data[current_key] = token.value return data
1,132,986
Parse source and consume tokens from tinycss2. Arguments: source (string): Source content to parse. Returns: dict: Retrieved rules.
def consume(self, source): manifest = OrderedDict() rules = parse_stylesheet( source, skip_comments=True, skip_whitespace=True, ) for rule in rules: # Gather rule selector+properties name = self.digest_prelude(rule) # Ignore everything out of styleguide namespace if not name.startswith(RULE_BASE_PREFIX): continue properties = self.digest_content(rule) manifest[name] = properties return manifest
1,132,987
Try to guess how much pages are in book listing. Args: dom (obj): HTMLElement container of the page with book list. Returns: int: Number of pages for given category.
def _get_max_page(dom): div = dom.find("div", {"class": "razeniKnihListovani"}) if not div: return 1 # isolate only page numbers from links links = div[0].find("a") max_page = filter( lambda x: "href" in x.params and "pageindex=" in x.params["href"], links ) max_page = map( lambda x: x.params["href"].split("pageindex=")[-1], max_page ) max_page = filter(lambda x: x.isdigit(), max_page) max_page = map(lambda x: int(x), max_page) if not max_page: return 1 return max(max_page)
1,133,118
Parse links to the details about publications from page with book list. Args: dom (obj): HTMLElement container of the page with book list. Returns: list: List of strings / absolute links to book details.
def _parse_book_links(dom): links = [] picker = lambda x: x.params.get("class", "").startswith("boxProKnihy") for el in dom.find(None, fn=picker): book_ref = el.find("a") if not book_ref or "href" not in book_ref[0].params: continue links.append(book_ref[0].params["href"]) return links
1,133,119
Go thru `links` to categories and return list to all publications in all given categories. Args: links (list): List of strings (absolute links to categories). Returns: list: List of strings / absolute links to book details.
def get_book_links(links): book_links = [] for link in links: data = DOWNER.download(link + "1") dom = dhtmlparser.parseString(data) book_links.extend(_parse_book_links(dom)) max_page = _get_max_page(dom) if max_page == 1: continue for i in range(max_page - 1): data = DOWNER.download(link + str(i + 2)) book_links.extend( _parse_book_links( dhtmlparser.parseString(data) ) ) return book_links
1,133,120
Parse informations about authors of the book. Args: dom (obj): HTMLElement containing slice of the page with details. Returns: list: List of :class:`.Author` objects. Blank if no author \ found.
def _parse_authors(authors): link = authors.find("a") link = link[0].params.get("href") if link else None author_list = _strip_content(authors) if "(" in author_list: author_list = author_list.split("(")[0] if not author_list.strip(): return [] return map( lambda author: Author(author.strip(), link), author_list.strip().split(",") )
1,133,121
Download and parse available informations about book from the publishers webpages. Args: link (str): URL of the book at the publishers webpages. Returns: obj: :class:`.Publication` instance with book details.
def _process_book(link): # download and parse book info data = DOWNER.download(link) dom = dhtmlparser.parseString( utils.handle_encodnig(data) ) dhtmlparser.makeDoubleLinked(dom) # some books are without price in expected elements, this will try to get # it from elsewhere price = None try: price = _strip_content(zapi.get_price(dom)) except UserWarning: price = dom.find("p", {"class": "vaseCena"}) if price: price = price[0].getContent().replace("&nbsp;", " ") price = filter(lambda x: x.isdigit(), price.strip()) if price: price = price[0] + "kč" else: price = "-1" else: price = "-1" # required informations pub = Publication( title=_strip_content(zapi.get_title(dom)), authors=_parse_authors(zapi.get_author(dom)), price=price, publisher=_strip_content(zapi.get_publisher(dom)) ) # optional informations pub.optionals.URL = link pub.optionals.pages = _strip_content(zapi.get_pages(dom)) pub.optionals.pub_date = _strip_content(zapi.get_pub_date(dom)) pub.optionals.ISBN = _strip_content(zapi.get_ISBN(dom)) pub.optionals.binding = _strip_content(zapi.get_binding(dom)) # post checks if pub.title.startswith("E-kniha:"): pub.title = pub.title.replace("E-kniha:", "", 1).strip() pub.optionals.is_ebook = True if pub.optionals.ISBN: if " " in pub.optionals.ISBN: pub.optionals.ISBN = pub.optionals.ISBN.split(" ")[0] if "(" in pub.optionals.ISBN: pub.optionals.ISBN = pub.optionals.ISBN.split("(")[0] return pub
1,133,122
Serialize this keymap as a JSON formatted stream to the *fp*. Arguments: fp: A ``.write()``-supporting file-like object to write the generated JSON to (default is ``sys.stdout``). **kwargs: Options to be passed into :func:`json.dumps`.
def dump(self, fp=sys.stdout, **kwargs): fp.write(FILE_HEADER) fp.write(self.to_json(**kwargs)) fp.write('\n')
1,133,512
Append the given bindings to this keymap. Arguments: *bindings (Binding): Bindings to be added. Returns: Keymap: self
def extend(self, *bindings): self._bindings.extend(self._preprocess(bindings)) return self
1,133,513
Bind the keys to the specified *command* with some *args*. Arguments: command (str): Name of the ST command (e.g. ``insert_snippet``). **args: Arguments for the command. Returns: Binding: self
def to(self, command, **args): self.command = command self.args = args return self
1,133,518
Specify context, i.e. condition that must be met. Arguments: key (str): Name of the context whose value you want to query. Returns: Context:
def when(self, key): ctx = Context(key, self) self.context.append(ctx) return ctx
1,133,519
Initialize the object. Args: conf_path (str): See :attr:`conf_path`. project_key (str, default None): See :attr:`project_key`. If not set, the root of the database is used (this may cause performace issues). run_asyncore_thread (bool, default True): Run external asyncore thread, which handles connections to database? Default True.
def __init__(self, conf_path, project_key=None, run_asyncore_thread=True): self.conf_path = conf_path super(ZEOConfWrapper, self).__init__( project_key=project_key, run_asyncore_thread=run_asyncore_thread, )
1,133,568
Determine if parser argument is an existing file or directory. This technique comes from http://stackoverflow.com/a/11541450/95592 and from http://stackoverflow.com/a/11541495/95592 Args: arg: parser argument containing filename to be checked arg_type: string of either "file" or "directory" Returns: If the file exists, return the filename or directory. Raises: If the file does not exist, raise a parser error.
def extant_item(arg, arg_type): if arg_type == "file": if not os.path.isfile(arg): raise argparse.ArgumentError( None, "The file {arg} does not exist.".format(arg=arg)) else: # File exists so return the filename return arg elif arg_type == "directory": if not os.path.isdir(arg): raise argparse.ArgumentError( None, "The directory {arg} does not exist.".format(arg=arg)) else: # Directory exists so return the directory name return arg
1,133,588
Parse the args using the config_file, input_dir, output_dir pattern Args: args: sys.argv Returns: The populated namespace object from parser.parse_args(). Raises: TBD
def parse_config_input_output(args=sys.argv): parser = argparse.ArgumentParser( description='Process the input files using the given config') parser.add_argument( 'config_file', help='Configuration file.', metavar='FILE', type=extant_file) parser.add_argument( 'input_dir', help='Directory containing the input files.', metavar='DIR', type=extant_dir) parser.add_argument( 'output_dir', help='Directory where the output files should be saved.', metavar='DIR', type=extant_dir) return parser.parse_args(args[1:])
1,133,589
Parse the args using the config_file pattern Args: args: sys.argv Returns: The populated namespace object from parser.parse_args(). Raises: TBD
def parse_config(args=sys.argv): parser = argparse.ArgumentParser( description='Read in the config file') parser.add_argument( 'config_file', help='Configuration file.', metavar='FILE', type=extant_file) return parser.parse_args(args[1:])
1,133,590
Returns current ICachedItem for ICachableItem Args: CachableItem: ICachableItem, used as a reference to find a cached version Returns: ICachedItem or None, if CachableItem has not been cached
def get(self, CachableItem): return self.session.\ query(self.mapper.factory().__class__).\ filter(self.mapper.factory().__class__.__dict__[self.mapper.key()]==CachableItem.getId()).\ first()
1,133,607
Formats a datatype value to a SPARQL representation args: item: the datatype object dt_format: the return format ['turtle', 'uri']
def format_sparql(item, dt_format='turtle', **kwargs): try: rtn_val = json.dumps(item.value) rtn_val = item.value except: if 'time' in item.class_type.lower() \ or 'date' in item.class_type.lower(): rtn_val = item.value.isoformat() else: rtn_val = str(item.value) if hasattr(item, "datatype"): if hasattr(item, "lang") and item.lang: rtn_val = '%s@%s' % (json.dumps(rtn_val), item.lang) else: dt = item.datatype if dt_format == "uri": dt = item.datatype.sparql_uri if item.datatype in ["xsd_string", "xsd_dateTime", "xsd_time", "xsd_date"]: rtn_val = json.dumps(rtn_val) else: rtn_val = '"%s"' % json.dumps(rtn_val) rtn_val = '%s^^%s' % (rtn_val, dt.sparql) return rtn_val
1,133,615
Rormats the value in various formats args: method: ['sparql', 'json', 'pyuri'] dt_format: ['turtle','uri'] used in conjuction with the 'sparql' method
def _format(self, method="sparql", dt_format="turtle"): try: return __FORMAT_OPTIONS__[method](self, dt_format=dt_format) except KeyError: raise NotImplementedError("'{}' is not a valid format method" "".format(method))
1,133,616
cycles through the instanciated namespaces to see if it has already been created args: namespace: tuple of prefix and uri to check
def __is_new_ns__(cls, namespace): for ns in cls._ns_instances: if ns[0] == namespace[0] and ns[1] == namespace[1]: return ns elif ns[0] == namespace[0] and ns[1] != namespace[1]: raise NsPrefixExistsError(namespace, ns, "prefix [%s] already assigned to [%s]" % (namespace, ns[1])) elif ns[0] != namespace[0] and ns[1] == namespace[1]: raise NsUriExistsError(namespace, ns, "uri [%s] already assigned to [%s]" % (namespace, ns[0])) return True
1,133,620
Extends the function to add an attribute to the class for each added namespace to allow for use of dot notation. All prefixes are converted to lowercase Args: prefix: string of namespace name namespace: rdflib.namespace instance kwargs: calc: whether or not create the lookup reference dictionaries Example usage: RdfNsManager.rdf.type => http://www.w3.org/1999/02/22-rdf-syntax-ns#type
def bind(self, prefix, namespace, *args, **kwargs): # RdfNamespace(prefix, namespace, **kwargs) setattr(self, prefix, RdfNamespace(prefix, namespace, **kwargs)) if kwargs.pop('calc', True): self.__make_dicts__
1,133,628
Reads the the beginning of a turtle file and sets the prefix's used in that file and sets the prefix attribute Args: filepath: the path to the turtle file file_encoding: specify a specific encoding if necessary
def load(self, filepath, file_encoding=None): with open(filepath, encoding=file_encoding) as inf: for line in inf: current_line = str(line).strip() if current_line.startswith("@prefix"): self._add_ttl_ns(current_line.replace("\n","")) elif len(current_line) > 10: break self.__make_dicts__
1,133,630
Reads a dictionary of namespaces and binds them to the manager Args: ns_dict: dictionary with the key as the prefix and the value as the uri
def dict_load(self, ns_dict): for prefix, uri in ns_dict.items(): self.bind(prefix, uri, override=False, calc=False) self.__make_dicts__
1,133,631
takes one prefix line from the turtle file and binds the namespace to the class Args: line: the turtle prefix line string
def _add_ttl_ns(self, line): lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3])) lg.setLevel(self.log_level) lg.debug("line:\n%s", line) line = str(line).strip() # if the line is not a prefix line exit if line is None or line == 'none' or line == '' \ or not line.lower().startswith('@prefix'): return # parse the turtle line line = line.replace("@prefix","",1).strip() if line.endswith("."): line = line[:-1] prefix = line[:line.find(":")].strip() uri = self.clean_iri(line[line.find(":")+1:].strip()) # add the namespace to the class lg.debug("\nprefix: %s uri: %s", prefix, uri) self.bind(prefix, uri, override=False, calc=False)
1,133,632
will remove a namespace ref from the manager. either Arg is optional. args: namespace: prefix, string or Namespace() to remove
def del_ns(self, namespace): # remove the item from the namespace dict namespace = str(namespace) attr_name = None if hasattr(self, namespace): delattr(self, namespace)
1,133,633
Converts py_uri or ttl uri to a http://... full uri format Args: value: the string to convert Returns: full uri of an abbreivated uri
def uri(self, value, strip_iri=True): return self.convert_to_uri(value, strip_iri=strip_iri)
1,133,634
converts a prefixed rdf ns equivalent value to its uri form. If not found returns the value as is args: value: the URI/IRI to convert strip_iri: removes the < and > signs rdflib_uri: returns an rdflib URIRef
def convert_to_uri(self, value, strip_iri=True): parsed = self.parse_uri(str(value)) try: new_uri = "%s%s" % (self.ns_dict[parsed[0]], parsed[1]) if not strip_iri: return self.iri(new_uri) return new_uri except KeyError: return self.rpyhttp(value)
1,133,635
takes an value and returns a tuple of the parts args: value: a uri in any form pyuri, ttl or full IRI
def get_uri_parts(self, value): if value.startswith('pyuri_'): value = self.rpyhttp(value) parts = self.parse_uri(value) try: return (self.ns_dict[parts[0]], parts[1]) except KeyError: try: return (self.ns_dict[parts[0].lower()], parts[1]) except KeyError: return ((None, parts[0]), parts[1])
1,133,636
converts a string to an IRI or returns an IRI if already formated Args: uri_string: uri in string format Returns: formated uri with <>
def iri(uri_string): uri_string = str(uri_string) if uri_string[:1] == "?": return uri_string if uri_string[:1] == "[": return uri_string if uri_string[:1] != "<": uri_string = "<{}".format(uri_string.strip()) if uri_string[len(uri_string)-1:] != ">": uri_string = "{}>".format(uri_string.strip()) return uri_string
1,133,639
converts a value to the prefixed rdf ns equivalent. If not found returns the value as is. args: value: the value to convert
def convert_to_ttl(self, value): parsed = self.parse_uri(value) try: rtn_val = "%s:%s" % (self.uri_dict[parsed[0]], parsed[1]) except KeyError: rtn_val = self.iri(self.rpyhttp(value)) return rtn_val
1,133,640
converts a value to the prefixed rdf ns equivalent. If not found returns the value as is args: value: the value to convert
def convert_to_ns(self, value): parsed = self.parse_uri(value) try: rtn_val = "%s_%s" % (self.uri_dict[parsed[0]], parsed[1]) except KeyError: rtn_val = self.pyhttp(value) return rtn_val
1,133,641
Parses a value into a head and tail pair based on the finding the last '#' or '/' as is standard with URI fromats args: value: string value to parse returns: tuple: (lookup, end)
def parse_uri(value): value = RdfNsManager.clean_iri(value) lookup = None end = None try: lookup = value[:value.rindex('#')+1] end = value[value.rindex('#')+1:] except ValueError: try: lookup = value[:value.rindex('/')+1] end = value[value.rindex('/')+1:] except ValueError: try: lookup = value[:value.index(':')] end = value[value.rindex(':')+1:] except ValueError: try: lookup = value[:value.index('_')] end = value[value.index('_')+1:] except ValueError: lookup = value end = "" return (lookup, end)
1,133,642
method to send a message to a user Parameters: to -> recipient msg -> message to send label -> application description title -> name of the notification event uri -> callback uri
def send_notification(self, to=None, msg=None, label=None, title=None, uri=None): url = self.root_url + "send_notification" values = {} if to is not None: values["to"] = to if msg is not None: values["msg"] = msg if label is not None: values["label"] = label if title is not None: values["title"] = title if uri is not None: values["uri"] = uri return self._query(url, values)
1,133,683
method to send a message to a user Parameters: to -> recipient msg -> message to send
def send_message(self, to=None, msg=None): url = self.root_url + "send_message" values = {} if to is not None: values["to"] = to if msg is not None: values["msg"] = msg return self._query(url, values)
1,133,684
query method to do HTTP POST/GET Parameters: url -> the url to POST/GET data -> header_data as a dict (only for POST) Returns: Parsed JSON data as dict or None on error
def _query(self, url, data = None): auth = encodestring('%s:%s' % (self.user, self.secret)).replace('\n', '') if data is not None: # we have POST data if there is data values = urllib.urlencode(data) request = urllib2.Request(url, values) request.add_header("Authorization", "Basic %s" % auth) else: # do a GET otherwise request = urllib2.Request(url) request.add_header("Authorization", "Basic %s" % auth) try: response = urllib2.urlopen(request) except IOError, e: # no connection return {"status" : "error", "response_code" : e.code, "response_message" : e.msg } return json.loads(response.read())
1,133,685
queries the database and returns that status of the item. args: status_item: the name of the item to check
def get(self, status_item): lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3])) lg.setLevel(self.log_level) sparql = value = self.conn.query(sparql=sparql.format(self.group, status_item)) if len(value) > 0 and \ cbool(value[0].get('loaded',{}).get("value",False)): return True else: return False
1,133,782
sets the status item to the passed in paramaters args: status_item: the name if the item to set status: boolean value to set the item
def set(self, status_item, status): lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3])) lg.setLevel(self.log_level) sparql = return self.conn.query(sparql=sparql.format(self.group, status_item, str(status).lower()), mode='update')
1,133,783
Iterates through a list of queries and runs them through the connection Args: ----- queries: list of strings or tuples containing (query_string, kwargs) conn: the triplestore connection to use
def run_query_series(queries, conn): results = [] for item in queries: qry = item kwargs = {} if isinstance(item, tuple): qry = item[0] kwargs = item[1] result = conn.update_query(qry, **kwargs) # pdb.set_trace() results.append(result) return results
1,133,949
Returns all the triples for a specific are graph args: graph: the URI of the graph to retreive conn: the rdfframework triplestore connection
def get_graph(graph, conn, **kwargs): sparql = render_without_request("sparqlGraphDataTemplate.rq", prefix=NSM.prefix(), graph=graph) return conn.query(sparql, **kwargs)
1,133,951
Make the filter section for a query template args: filters: list of dictionaries to generate the filter example: filters = [{'variable': 'p', 'operator': '=', 'union_type': '||', 'values': ['rdf:type', 'rdfs:label']}]
def make_sparql_filter(filters): def make_filter_str(variable, operator, union_type, values): formated_vals = UniqueList() for val in values: try: formated_vals.append(val.sparql) except AttributeError: formated_vals.append(val) pre_str = "?%s%s" % (variable.replace("?", ""), operator) union = "%s\n\t\t" % union_type return "\tFilter( %s) .\n" % union.join([pre_str + val for val in formated_vals]) if not filters: return "" rtn_str = "" for param in filters: rtn_str += make_filter_str(**param) return rtn_str
1,133,952
Detects and opens compressed files Args: parser (ArgumentParser): parser used to generate values namespace (Namespace): namespace to set values for value (str): actual value specified by user option_string (str): argument flag used to call this function **kwargs (various): optional arguments later passed to the compression algorithm
def __call__(self, parser, namespace, value, option_string=None, **kwargs): handle = copen(value, mode=self.mode, **self.kwargs) setattr(namespace, self.dest, handle)
1,134,013
Converts an ill-conditioned correlation matrix into well-conditioned matrix with one common correlation coefficient Parameters: ----------- R : ndarray an illconditioned correlation matrix, e.g. oxyba.illcond_corrmat Return: ------- cmat : ndarray DxD matrix with +1 as diagonal elements and 1 common coefficient for all other relations.
def onepara(R): import numpy as np import warnings d = R.shape[0] if d < 2: raise Exception(( "More than one variable is required." "Supply at least a 2x2 matrix.")) # the explicit solution x = (np.sum(R) + np.trace(R)) / (d**2 - d) if x < (-1. / (d - 1)) or x > 1: warnings.warn("No analytic solution found x={:.8f}".format(x)) return None else: C = np.eye(d) C[np.logical_not(C)] = x return C
1,134,317
Takes a list of Item objects and returns a list of Item objects with respective prices modified Uses the given list of item objects to formulate a query to the item database. Uses the returned results to populate each item in the list with its respective price, then returns the modified list. Parameters: items (list[Item]) -- List of items to price Returns list[Item] - Priced list of items
def priceItems(items): retItems = [] sendItems = [] for item in items: sendItems.append(item.name) resp = CodexAPI.searchMany(sendItems) for respItem in resp: retItems[respItem['name']].price = respItem['price'] return retItems
1,134,477
Own "dumb" reimplementation of textwrap.wrap(). This is because calling .wrap() on bigger strings can take a LOT of processor power. And I mean like 8 seconds of 3GHz CPU just to wrap 20kB of text without spaces. Args: text (str): Text to wrap. columns (int): Wrap after `columns` characters. Returns: str: Wrapped text.
def _wrap(text, columns=80): out = [] for cnt, char in enumerate(text): out.append(char) if (cnt + 1) % columns == 0: out.append("\n") return "".join(out)
1,134,481
checks to see if the server_core is running args: delay: will cycle till core is up. timeout: number of seconds to wait
def verify_server_core(timeout=120, start_delay=90): timestamp = time.time() last_check = time.time() + start_delay - 10 last_delay_notification = time.time() - 10 server_down = True return_val = False timeout += 1 # loop until the server is up or the timeout is reached while((time.time()-timestamp) < timeout) and server_down: # if delaying, the start of the check, print waiting to start if start_delay > 0 and time.time() - timestamp < start_delay \ and (time.time()-last_delay_notification) > 5: print("Delaying server status check until %ss. Current time: %ss" \ % (start_delay, int(time.time() - timestamp))) last_delay_notification = time.time() # send a request check every 10s until the server is up while ((time.time()-last_check) > 10) and server_down: print("Checking status of servers at %ss" % \ int((time.time()-timestamp))) last_check = time.time() try: repo = requests.get(CFG.REPOSITORY_URL) repo_code = repo.status_code print ("\t", CFG.REPOSITORY_URL, " - ", repo_code) except: repo_code = 400 print ("\t", CFG.REPOSITORY_URL, " - DOWN") try: triple = requests.get(CFG.DATA_TRIPLESTORE.url) triple_code = triple.status_code print ("\t", CFG.DATA_TRIPLESTORE.url, " - ", triple_code) except: triple_code = 400 print ("\t", CFG.DATA_TRIPLESTORE.url, " - down") if repo_code == 200 and triple_code == 200: server_down = False return_val = True print("**** Servers up at %ss" % \ int((time.time()-timestamp))) break return return_val
1,134,540
loads an rml mapping into memory args: rml_name(str): the name of the rml file
def load_rml(self, rml_name): conn = CFG.rml_tstore cache_path = os.path.join(CFG.CACHE_DATA_PATH, 'rml_files', rml_name) if not os.path.exists(cache_path): results = get_graph(NSM.uri(getattr(NSM.kdr, rml_name), False), conn) with open(cache_path, "w") as file_obj: file_obj.write(json.dumps(results, indent=4)) else: results = json.loads(open(cache_path).read()) self.rml[rml_name] = RdfDataset(results) return self.rml[rml_name]
1,134,543
loads the RDF/turtle application data to the triplestore args: reset(bool): True will delete the definition dataset and reload all of the datafiles.
def _load_data(self, reset=False): log = logging.getLogger("%s.%s" % (self.log_name, inspect.stack()[0][3])) log.setLevel(self.log_level) for attr, obj in self.datafile_obj.items(): if reset or obj['latest_mod'] > obj['last_json_mod']: conn = obj['conn'] sparql = "DROP ALL;" if os.path.isdir(obj['cache_path']): shutil.rmtree(obj['cache_path'], ignore_errors=True) os.makedirs(obj['cache_path']) drop_extensions = conn.update_query(sparql) rdf_resource_templates = [] rdf_data = [] for path, files in obj['files'].items(): for file in files: file_path = os.path.join(path, file) # data = open(file_path).read() # log.info(" uploading file: %s | namespace: %s", # file, # conn.namespace) # data_type = file.split('.')[-1] result = conn.load_data(file_path, #datatype=data_type, graph=str(getattr(NSM.kdr, file)), is_file=True) if result.status_code > 399: raise ValueError("Cannot load '{}' into {}".format( file_name, conn))
1,134,546
Define a subcommand. Args: *args (str): Sequence of program arguments needed to run the command. directory (Optional[str]): Directory the command is run in. env_vars (Optional[dict]): Environment variable to feed to the subcommand.
def __init__(self, *args, **kwargs): self.command = list(args) self.directory = kwargs['directory'] if 'directory' in kwargs else None self.env_vars = kwargs['env_vars'] if 'env_vars' in kwargs else None
1,134,671
Get subcommand acting on a service. Subcommand will run in service directory and with the environment variables used to run the service itself. Args: *args: Arguments to run command (e.g. "redis-cli", "-n", "1") Returns: Subcommand object.
def subcommand(self, *args): return Subcommand(*args, directory=self.directory, env_vars=self.env_vars)
1,134,677
Forecasts how long a backlog will take to complete given the historical values provided. Arguments: throughputs(List[int]): Number of units completed per unit of time (stories per week, story points per month, etc.) backlog_size(int): Units in the backlog (stories, points, etc.) Returns: results Exceptions: ValueError: If there aren't any positive throughputs, or the simulation takes too long.
def forecast(self, throughputs, backlog_size, num_simulations=10000, max_periods=10000, seed=None): self._check_throughputs(throughputs) results = [] if seed is not None: random.seed(seed) for i in range(0, num_simulations): simulated_backlog = backlog_size time_unit_count = 0 while simulated_backlog > 0: simulated_backlog -= random.choice(throughputs) time_unit_count += 1 if time_unit_count > max_periods: raise ValueError("More than {} periods calculated".format(max_periods)) results.append(time_unit_count) return Results(results)
1,134,796
Parse title from alternative location if not found where it should be. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: str: Book's title.
def _parse_alt_title(html_chunk): title = html_chunk.find( "input", {"src": "../images_buttons/objednat_off.gif"} ) assert title, "Can't find alternative title!" title = title[0] assert "title" in title.params, "Can't find alternative title source!" # title is stored as Bleh bleh: Title title = title.params["title"].split(":", 1)[-1] return title.strip()
1,134,825
Parse title/name of the book and URL of the book. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: tuple: (title, url), both as strings.
def _parse_title_url(html_chunk): title = html_chunk.find("div", {"class": "comment"}) if not title: return _parse_alt_title(html_chunk), None title = title[0].find("h2") if not title: return _parse_alt_title(html_chunk), None # look for the url of the book if present url = None url_tag = title[0].find("a") if url_tag: url = url_tag[0].params.get("href", None) title = url_tag return title[0].getContent(), normalize_url(BASE_URL, url)
1,134,826
Parse authors of the book. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: list: List of :class:`structures.Author` objects. Blank if no author \ found.
def _parse_authors(html_chunk): authors = html_chunk.match( ["div", {"class": "comment"}], "h3", "a", ) if not authors: return [] authors = map( lambda x: Author( # create Author objects x.getContent().strip(), normalize_url(BASE_URL, x.params.get("href", None)) ), authors ) return filter(lambda x: x.name.strip(), authors)
1,134,827
Parse format, number of pages and ISBN. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: tuple: (format, pages, isbn), all as string.
def _parse_format_pages_isbn(html_chunk): ppi = get_first_content( html_chunk.find("div", {"class": "price-overflow"}) ) if not ppi: return None, None, None # all information this function should parse are at one line ppi = filter(lambda x: x.strip(), ppi.split("<br />"))[0] # parse isbn isbn = dhtmlparser.parseString(ppi) isbn = isbn.find("b") isbn = isbn[0].getContent() if isbn else None # parse pages and format pages = None book_format = None details = ppi.split("|") if len(details) >= 2: book_format = details[0].strip() pages = details[1].strip() return book_format, pages, isbn
1,134,828
Parse price of the book. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: str/None: Price as string with currency or None if not found.
def _parse_price(html_chunk): price = get_first_content( html_chunk.find("div", {"class": "prices"}) ) if not price: return None # it is always in format Cena:\n150kč price = dhtmlparser.removeTags(price) price = price.split("\n")[-1] return price
1,134,829
Parse available informations about book from the book details page. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: obj: :class:`structures.Publication` instance with book details.
def _process_book(html_chunk): title, url = _parse_title_url(html_chunk) book_format, pages, isbn = _parse_format_pages_isbn(html_chunk) # required informations pub = Publication( title=title, authors=_parse_authors(html_chunk), price=_parse_price(html_chunk), publisher="Grada" ) # optional informations pub.optionals.URL = url pub.optionals.ISBN = isbn pub.optionals.pages = pages pub.optionals.format = book_format pub.optionals.sub_title = _parse_subtitle(html_chunk) pub.optionals.description = _parse_description(html_chunk) return pub
1,134,830
The external landing. Also a convenience function for redirecting users who don't have site access to the external page. Parameters: request - the request in the calling function message - a message from the caller function
def red_ext(request, message=None): if message: messages.add_message(request, messages.ERROR, message) return HttpResponseRedirect(reverse('external'))
1,134,918
Convenience function for redirecting users who don't have access to a page to the home page. Parameters: request - the request in the calling function message - a message from the caller function
def red_home(request, message=None): if message: messages.add_message(request, messages.ERROR, message) return HttpResponseRedirect(reverse('homepage'))
1,134,919
Permet d'ajouter un fichier Args: file (string): path d'un fichier json Returns: type: None Raises: FileFormatException: Erreur du format de fichier
def addFile(self,file): mylambda= lambda adict : { key.upper() : mylambda(adict[key]) if isinstance(adict[key],dict) else adict[key] for key in adict.keys() } if file.endswith('.json') : with open(file, 'r') as f: fileContent = mylambda(json.load(f)) elif file.endswith('.ini') : parser = configparser.ConfigParser() parser.read(file) fileContent = { section : { conflist[0].upper() : conflist[1] for conflist in parser.items(section) } for section in parser.sections() } else : raise FileFormatException() self._config = {**self._config, **mylambda(fileContent)}
1,135,203
permet de récupérer une config Args: path (String): Nom d'une config Returns: type: String la valeur de la config ou None
def get(self,path): path = path.upper() if path in self._configCache: return self._configCache[path] else : return self._findConfig(path)
1,135,204
converts the results of a query to RdfDatatype instances args: data: a list of triples
def convert_results(data, **kwargs): if kwargs.get("multiprocessing", False): manager = SharedManager() manager.register("BaseRdfDataType", BaseRdfDataType) manager.register("Uri", Uri) data_l = len(data) group_size = data_l // pool_size if data_l % pool_size: group_size += 1 split_data = [data[i:i + group_size] for i in range(0, data_l, group_size)] output = manager.Queue() # output = manager.list() # output_data = POOL.map(convert_row, split_data) workers = [mp.Process(target=convert_batch, args=(item, output,)) for item in split_data] for worker in workers: # worker.Daemon = True worker.start() results = [] while True: running = any(p.is_alive() for p in workers) while not output.empty(): results += output.get() if not running: break print("Finished - workers not stoped") for worker in workers: worker.join() # pdb.set_trace() # return output for i in range(output.qsize()): results += output.get() return results else: return [{key:pyrdf(value) for key, value in row.items()} for row in data]
1,135,333
Update the parser object for the shell. Arguments: parser: An instance of argparse.ArgumentParser.
def update_parser(parser): def __stdin(s): if s is None: return None if s == '-': return sys.stdin return open(s, 'r', encoding = 'utf8') parser.add_argument('--root-prompt', metavar = 'STR', default = 'PlayBoy', help = 'the root prompt string') parser.add_argument('--temp-dir', metavar = 'DIR', default = '/tmp/easyshell_demo', help = 'the directory to save history files') parser.add_argument('--debug', action = 'store_true', help = 'turn debug infomation on') parser.add_argument('file', metavar = 'FILE', nargs = '?', type = __stdin, help = "execute script in non-interactive mode. '-' = stdin")
1,135,672
When called, get the environment updates and write updates to a CSV file and if a new config has been provided, write a new configuration file. Args: display_all_distributions (bool): Return distribution even if it is up-to-date. verbose (bool): If ``True``, log to terminal to terminal.
def get_updates( self, display_all_distributions=False, verbose=False ): # pragma: no cover if verbose: logging.basicConfig( stream=sys.stdout, level=logging.INFO, format='%(message)s', ) logging.info('Checking installed packages for updates...') updates = self._get_environment_updates( display_all_distributions=display_all_distributions ) if updates: for update in updates: logging.info(update) if updates and self._csv_file_name: self.write_updates_to_csv(updates) if updates and self._new_config: self.write_new_config(updates) return updates
1,135,775
Given a list of updates, write the updates out to the provided CSV file. Args: updates (list): List of Update objects.
def write_updates_to_csv(self, updates): with open(self._csv_file_name, 'w') as csvfile: csvwriter = self.csv_writer(csvfile) csvwriter.writerow(CSV_COLUMN_HEADERS) for update in updates: row = [ update.name, update.current_version, update.new_version, update.prelease, ] csvwriter.writerow(row)
1,135,777
Given a list of updates, write the updates out to the provided configuartion file. Args: updates (list): List of Update objects.
def write_new_config(self, updates): with open(self._new_config, 'w') as config_file: for update in updates: line = '{0}=={1} # The installed version is: {2}\n'.format( update.name, update.new_version, update.current_version ) config_file.write(line)
1,135,778
Check all pacakges installed in the environment to see if there are any updates availalble. Args: display_all_distributions (bool): Return distribution even if it is up-to-date. Defaults to ``False``. Returns: list: A list of Update objects ordered based on ``instance.name``.
def _get_environment_updates(self, display_all_distributions=False): updates = [] for distribution in self.pip.get_installed_distributions(): versions = self.get_available_versions(distribution.project_name) max_version = max(versions.keys()) if versions else UNKNOW_NUM update = None distribution_version = self._parse_version(distribution.version) if versions and max_version > distribution_version: update = Update( distribution.project_name, distribution.version, versions[max_version], prelease=max_version[-1] ) elif ( display_all_distributions and max_version == distribution_version ): update = Update( distribution.project_name, distribution.version, versions[max_version], ) elif display_all_distributions: update = Update( distribution.project_name, distribution.version, UNKNOWN ) if update: updates.append(update) return sorted(updates, key=lambda x: x.name)
1,135,779
Query PyPI to see if package has any available versions. Args: project_name (str): The name the project on PyPI. Returns: dict: Where keys are tuples of parsed versions and values are the versions returned by PyPI.
def get_available_versions(self, project_name): available_versions = self.pypi_client.package_releases(project_name) if not available_versions: available_versions = self.pypi_client.package_releases( project_name.capitalize() ) # ``dict()`` for Python 2.6 syntax. return dict( (self._parse_version(version), version) for version in available_versions )
1,135,780
Parse a version string. Args: version (str): A string representing a version e.g. '1.9rc2' Returns: tuple: major, minor, patch parts cast as integer and whether or not it was a pre-release version.
def _parse_version(version): parsed_version = parse_version(version) return tuple( int(dot_version) for dot_version in parsed_version.base_version.split('.') ) + (parsed_version.is_prerelease,)
1,135,781
Parses a json query string into its parts args: qry_str: query string params: variables passed into the string
def parse_json_qry(qry_str): def param_analyzer(param_list): rtn_list = [] for param in param_list: parts = param.strip().split("=") try: rtn_list.append(\ JsonQryProcessor[parts[0].strip().lower()](parts[1])) except IndexError: rtn_list.append(\ JsonQryProcessor[parts[0].strip().lower()]()) return rtn_list def part_analyzer(part, idx): nonlocal dallor, asterick, question_mark if part == "$": dallor = idx return part elif part == "*": asterick = idx return part elif part == "?": question_mark = idx return part elif part.startswith("="): return part return cssparse(part)[0] # pdb.set_trace() main_parts = qry_str.split("|") or_parts = main_parts.pop(0).strip() params = param_analyzer(main_parts) rtn_list = [] for or_part in [item.strip() for item in or_parts.split(",") if item.strip()]: dallor, asterick, question_mark = None, None, None dot_parts = or_part.split(".") rtn_list.append(([part_analyzer(part, i) \ for i, part in enumerate(dot_parts)], dallor, asterick, question_mark)) return {"qry_parts": rtn_list, "params": params}
1,136,016
reads the paramater and returns the selected element args: dataset: the dataset to search param: the paramater to search by no_key: wheather to use the 'param' 'element' to filter the list. This is passed True after the first run during recurssive call when the key has already been used to select subset of the dataset
def get_json_qry_item(dataset, param, no_key=False): def get_dataset_vals(ds, key, filter_tup=tuple()): def reduce_list(value): if isinstance(value, list): if len(value) == 1: return value[0] return value def merge_list(value): if isinstance(value, list): rtn_list = [] for item in value: if isinstance(item, list): rtn_list += item else: rtn_list.append(item) try: return list(set(rtn_list)) except TypeError: return rtn_list return value def test_elem(elem, filter_tup): search_lst = elem if isinstance(elem, dict): search_lst = elem.get(filter_tup[0], []) if filter_tup[2] == '=': try: if elem.subject == filter_tup[1]: return True except AttributeError: pass test_lst = [item for item in search_lst \ if (isinstance(item, dict) \ and item.subject == filter_tup[1]) \ or item == filter_tup[1]] if test_lst: return True return False def filter_list(ds, key, filter_tup): rtn_list = ds if key: rtn_list = merge_list([reduce_list(reduce_list(elem)[key]) \ for elem in ds if isinstance(reduce_list(elem), dict) and reduce_list(elem).get(key)]) if filter_tup: return [elem for elem in rtn_list \ if test_elem(elem, filter_tup)] return rtn_list if isinstance(ds, list): return filter_list(ds, key, filter_tup) elif isinstance(ds, dict): search_dict = ds if key: search_dict = ds.get(key,[]) if filter_tup: datalist = [] for elem in search_dict: if filter_tup[2] == "=": # pdb.set_trace() if filter_tup[1] in elem.get(filter_tup[0], []): if isinstance(elem, list): datalist += elem else: datalist.append(elem) elif filter_tup[2] == "!=": if filter_tup[1] not in elem.get(filter_tup[0], []): datalist.append(elem) return datalist # return [elem for elem in ds[key] \ # if filter_tup[1] in elem.get(filter_tup[0], []) \ # and elem] return merge_list(search_dict) if param == "*": return dataset try: if param.startswith("="): # if the dataset length is '0' consider it a false match if dataset: return [pyrdf(param[1:])] return [] except AttributeError: pass if hasattr(param, 'parsed_tree'): param = param.parsed_tree if hasattr(param, 'selector'): if no_key: key = None else: key = get_element(param.selector) rtn_obj = None if hasattr(param, 'ident'): if key: rtn_obj = get_dataset_vals(dataset, key, ('rdf_type', param.ident, "=")) elif param.ident in dataset.get('rdf_type', []): rtn_obj = dataset else: rtn_obj = [value for value in dataset.values() if param.ident in value.get('rdf_type', [])] # pdb.set_trace() elif hasattr(param, 'attrib'): # if param.parsed_tree.attrib == 'bf_role': # pdb.set_trace() rtn_obj = get_dataset_vals(dataset, key, (param.attrib, param.value, param.operator)) if rtn_obj is not None: if hasattr(param, 'selector') \ and hasattr(param.selector, 'selector') \ and rtn_obj: rtn_obj = get_json_qry_item(rtn_obj, param.selector, True) return rtn_obj if key: return dataset[key] else: return dataset elif hasattr(param, 'element'): key = param.element return get_dataset_vals(dataset, key)
1,136,017
reads the paramater and returns the selected element args: dataset: the dataset to search param: the paramater to search by no_key: wheather to use the 'param' 'element' to filter the list. This is passed True after the first run during recurssive call when the key has already been used to select subset of the dataset
def get_reverse_json_qry_item(dataset, param, no_key=False, initial_val=None): def get_dataset_vals(ds, key, filter_tup=tuple(), initial_val=None): def reduce_list(value): if isinstance(value, list): if len(value) == 1: return value[0] return value def merge_list(value): if isinstance(value, list): rtn_list = [] for item in value: if isinstance(item, list): rtn_list += item else: rtn_list.append(item) return list(set(rtn_list)) return value def test_elem(elem, filter_tup): search_lst = elem if isinstance(elem, dict): search_lst = elem.get(filter_tup[0], []) if filter_tup[2] == '=': try: if elem.subject == filter_tup[1]: return True except AttributeError: pass test_lst = [item for item in search_lst \ if (isinstance(item, dict) \ and item.subject == filter_tup[1]) \ or item == filter_tup[1]] if test_lst: return True return False def filter_list(ds, key, filter_tup, initial_val=None): rtn_list = ds if key: rtn_list = merge_list([reduce_list(reduce_list(elem)[key]) \ for elem in ds if reduce_list(elem).get(key)]) if filter_tup: return [elem for elem in rtn_list \ if test_elem(elem, filter_tup)] return rtn_list def reverse_filter_list(ds, key, filter_tup, initial_val=None): def get_reverse_ds_dict(ds, key, filter_tup, initial_val=None): if hasattr(ds, 'rmap') and initial_val: # pdb.set_trace() if key: try: return ds.rmap[initial_val][key] except KeyError: return [] else: return ds.rmap[initial_val] data_list = UniqueList() if not key and not initial_val: return data_list for sub, ds_rdf_class in ds.items(): for pred, obj in ds_rdf_class.items(): if key and pred == key: if initial_val: if initial_val in obj: data_list.append(ds_rdf_class) else: data_list.append(ds_rdf_class) if not key and initial_val: if initial_val in obj: data_list.append(ds_rdf_class) # pdb.set_trace() return data_list if isinstance(ds, dict): return get_reverse_ds_dict(ds, key, filter_tup, initial_val) elif isinstance(ds, list): rtn_list = UniqueList() for item in ds: rtn_list += get_reverse_ds_dict(ds, key, filter_tup, initial_val) return rtn_list if isinstance(ds, list): return reverse_filter_list(ds, key, filter_tup, initial_val) elif isinstance(ds, dict): search_dict = ds if key: search_dict = reverse_filter_list(ds, key, filter_tup, initial_val) # pdb.set_trace() if filter_tup: datalist = [] for elem in search_dict: if filter_tup[2] == "=": # pdb.set_trace() if filter_tup[1] in elem.get(filter_tup[0], []): if isinstance(elem, list): datalist += elem else: datalist.append(elem) elif filter_tup[2] == "!=": if filter_tup[1] not in elem.get(filter_tup[0], []): datalist.append(elem) return datalist return merge_list(search_dict) if param == "*": pass # pdb.set_trace() if hasattr(param, 'parsed_tree'): param = param.parsed_tree if hasattr(param, 'selector'): if no_key: key = None else: key = get_element(param.selector) rtn_obj = None if hasattr(param, 'ident'): rtn_obj = get_dataset_vals(dataset, key, ('rdf_type', param.ident, "=")) elif hasattr(param, 'attrib'): # if param.parsed_tree.attrib == 'bf_role': # pdb.set_trace() rtn_obj = get_dataset_vals(dataset, key, (param.attrib, param.value, param.operator)) if rtn_obj is not None: if hasattr(param, 'selector') \ and hasattr(param.selector, 'selector') \ and rtn_obj: rtn_obj = get_json_qry_item(rtn_obj, param.selector, True) return rtn_obj return dataset.get(key, []) elif hasattr(param, 'element'): key = param.element return get_dataset_vals(dataset, key, initial_val=initial_val)
1,136,018
Takes a json query string and returns the results args: dataset: RdfDataset to query against qry_str: query string params: dictionary of params
def json_qry(dataset, qry_str, params={}): # if qry_str.startswith("$.bf_itemOf[rdf_type=bf_Print].='print',\n"): # pdb.set_trace() if not '$' in qry_str: qry_str = ".".join(['$', qry_str.strip()]) dallor_val = params.get("$", dataset) if isinstance(dallor_val, rdflib.URIRef): dallor_val = Uri(dallor_val) if qry_str.strip() == '$': return [dallor_val] parsed_qry = parse_json_qry(qry_str) qry_parts = parsed_qry['qry_parts'] post_actions = parsed_qry['params'] # print(qry_parts) rtn_list = UniqueList() if params.get('dataset'): dataset = params['dataset'] for or_part in qry_parts: if or_part[1] == 0: if isinstance(dallor_val, dict): result = dallor_val else: try: result = dataset[dallor_val] except KeyError: try: result = dataset[Uri(dallor_val)] except KeyError: try: result = dataset[BlankNode(dallor_val)] except KeyError: continue forward = True for part in or_part[0][1:]: if part == "*": forward = not forward else: if forward: result = get_json_qry_item(result, part) else: result = get_reverse_json_qry_item(result, part, False) else: result = dataset parts = or_part[0].copy() parts.reverse() forward = False for part in parts[1:]: if part == "*": forward = not forward else: if forward: result = get_json_qry_item(result, part) else: result = get_reverse_json_qry_item(result, part, False, dallor_val) rtn_list += result for action in post_actions: rtn_list = action(rtn_list) return rtn_list
1,136,019
Create equal sized slices of the file. The last slice may be larger than the others. args: * size (int): The full size to be sliced. * n (int): The number of slices to return. returns: A list of `FileSlice` objects of length (n).
def slices(self, size, n=3): if n <= 0: raise ValueError('n must be greater than 0') if size < n: raise ValueError('size argument cannot be less than n argument') slice_size = size // n last_slice_size = size - (n-1) * slice_size t = [self(c, slice_size) for c in range(0, (n-1)*slice_size, slice_size)] t.append(self((n-1)*slice_size, last_slice_size)) return t
1,136,335
Create a file slice. args: * start (int): The beginning position of the slice. The slice will read/write to this position when it is seeked to 0. * size (int): The size of the slice.
def __call__(self, start, size): return FileSlice(self.f, start, size, self.lock)
1,136,336
Gets a MatchObject for the given key. Args: key (str): Key of the property to look-up. Return: MatchObject: The discovered match.
def _get_match(self, key): return self._get_string_match(key=key) or \ self._get_non_string_match(key=key)
1,136,594
Gets a MatchObject for the given key, assuming a non-string value. Args: key (str): Key of the property to look-up. Return: MatchObject: The discovered match.
def _get_non_string_match(self, key): expression = r'(?:\s*)'.join([ '^', 'define', r'\(', '\'{}\''.format(key), ',', r'(.*)', r'\)', ';' ]) pattern = re.compile(expression, re.MULTILINE) return pattern.search(self._content)
1,136,595
Gets a MatchObject for the given key, assuming a string value. Args: key (str): Key of the property to look-up. Return: MatchObject: The discovered match.
def _get_string_match(self, key): expression = r'(?:\s*)'.join([ '^', 'define', r'\(', '\'{}\''.format(key), ',', r'\'(.*)\'', r'\)', ';' ]) pattern = re.compile(expression, re.MULTILINE) return pattern.search(self._content)
1,136,596
Gets the value of the property in the given MatchObject. Args: key (str): Key of the property looked-up. match (MatchObject): The matched property. Return: The discovered value, as a string or boolean.
def _get_value_from_match(self, key, match): value = match.groups(1)[0] clean_value = str(value).lstrip().rstrip() if clean_value == 'true': self._log.info('Got value of "%s" as boolean true.', key) return True if clean_value == 'false': self._log.info('Got value of "%s" as boolean false.', key) return False try: float_value = float(clean_value) self._log.info('Got value of "%s" as float "%f".', key, float_value) return float_value except ValueError: self._log.info('Got value of "%s" as string "%s".', key, clean_value) return clean_value
1,136,597
Gets the value of the property of the given key. Args: key (str): Key of the property to look-up.
def get(self, key): match = self._get_match(key=key) if not match: return None return self._get_value_from_match(key=key, match=match)
1,136,598
Updates the value of the given key in the loaded content. Args: key (str): Key of the property to update. value (str): New value of the property. Return: bool: Indicates whether or not a change was made.
def set(self, key, value): match = self._get_match(key=key) if not match: self._log.info('"%s" does not exist, so it will be added.', key) if isinstance(value, str): self._log.info('"%s" will be added as a PHP string value.', key) value_str = '\'{}\''.format(value) else: self._log.info('"%s" will be added as a PHP object value.', key) value_str = str(value).lower() new = 'define(\'{key}\', {value});'.format( key=key, value=value_str) self._log.info('"%s" will be added as: %s', key, new) replace_this = '<?php\n' replace_with = '<?php\n' + new + '\n' self._content = self._content.replace(replace_this, replace_with) self._log.info('Content string has been updated.') return True if self._get_value_from_match(key=key, match=match) == value: self._log.info('"%s" is already up-to-date.', key) return False self._log.info('"%s" exists and will be updated.', key) start_index = match.start(1) end_index = match.end(1) if isinstance(value, bool): value = str(value).lower() self._log.info('"%s" will be updated with boolean value: %s', key, value) else: self._log.info('"%s" will be updated with string value: %s', key, value) start = self._content[:start_index] end = self._content[end_index:] self._content = start + value + end return True
1,136,599
Get a user object from the API. If no ``user_id`` or ``user_name`` is specified, it will return the User object for the currently authenticated user. Args: user_id (int): User ID of the user for whom you want to get information. [Optional] user_name(str): Username for the user for whom you want to get information. [Optional] Returns: A User object.
def get_user(self, user_id=None, user_name=None): if user_id: endpoint = '/api/user_id/{0}'.format(user_id) elif user_name: endpoint = '/api/user_name/{0}'.format(user_name) else: # Return currently authorized user endpoint = '/api/user' data = self._make_request(verb="GET", endpoint=endpoint) try: return User.NewFromJSON(data) except: return data
1,136,734
Returns a SharedFile object given by the sharekey. Args: sharekey (str): Sharekey of the SharedFile you want to retrieve. Returns: SharedFile
def get_shared_file(self, sharekey=None): if not sharekey: raise Exception("You must specify a sharekey.") endpoint = '/api/sharedfile/{0}'.format(sharekey) data = self._make_request('GET', endpoint) return SharedFile.NewFromJSON(data)
1,136,737
'Like' a SharedFile. mlkshk doesn't allow you to unlike a sharedfile, so this is ~~permanent~~. Args: sharekey (str): Sharekey for the file you want to 'like'. Returns: Either a SharedFile on success, or an exception on error.
def like_shared_file(self, sharekey=None): if not sharekey: raise Exception( "You must specify a sharekey of the file you" "want to 'like'.") endpoint = '/api/sharedfile/{sharekey}/like'.format(sharekey=sharekey) data = self._make_request("POST", endpoint=endpoint, data=None) try: sf = SharedFile.NewFromJSON(data) sf.liked = True return sf except: raise Exception("{0}".format(data['error']))
1,136,738
Save a SharedFile to your Shake. Args: sharekey (str): Sharekey for the file to save. Returns: SharedFile saved to your shake.
def save_shared_file(self, sharekey=None): endpoint = '/api/sharedfile/{sharekey}/save'.format(sharekey=sharekey) data = self._make_request("POST", endpoint=endpoint, data=None) try: sf = SharedFile.NewFromJSON(data) sf.saved = True return sf except: raise Exception("{0}".format(data['error']))
1,136,739
Retrieve comments on a SharedFile Args: sharekey (str): Sharekey for the file from which you want to return the set of comments. Returns: List of Comment objects.
def get_comments(self, sharekey=None): if not sharekey: raise Exception( "You must specify a sharekey of the file you" "want to 'like'.") endpoint = '/api/sharedfile/{0}/comments'.format(sharekey) data = self._make_request("GET", endpoint=endpoint) return [Comment.NewFromJSON(c) for c in data['comments']]
1,136,742
Post a comment on behalf of the current user to the SharedFile with the given sharekey. Args: sharekey (str): Sharekey of the SharedFile to which you'd like to post a comment. comment (str): Text of the comment to post. Returns: Comment object.
def post_comment(self, sharekey=None, comment=None): endpoint = '/api/sharedfile/{0}/comments'.format(sharekey) post_data = {'body': comment} data = self._make_request("POST", endpoint=endpoint, data=post_data) return Comment.NewFromJSON(data)
1,136,743
Update the editable details (just the title and description) of a SharedFile. Args: sharekey (str): Sharekey of the SharedFile to update. title (Optional[str]): Title of the SharedFile. description (Optional[str]): Description of the SharedFile Returns: SharedFile on success, 404 on Sharekey not found, 403 on unauthorized.
def update_shared_file(self, sharekey=None, title=None, description=None): if not sharekey: raise Exception( "You must specify a sharekey for the sharedfile" "you wish to update.") if not (title or description): raise Exception("You must specify a title or description.") post_data = {} if title: post_data['title'] = title if description: post_data['description'] = description endpoint = '/api/sharedfile/{0}'.format(sharekey) data = self._make_request('POST', endpoint=endpoint, data=post_data) return SharedFile.NewFromJSON(data)
1,136,745
Return's the user shop the indexed item is in Parameters: index (int) -- The item index Returns UserShopFront - User shop item is in
def shop(self, index): return UserShopFront(self.usr, item.owner, item.id, str(item.price))
1,136,861
Attempts to buy indexed item, returns result Parameters: index (int) -- The item index Returns bool - True if item was bought, false otherwise
def buy(self, index): item = self.items[index] us = UserShopFront(self.usr, item.owner, item.id, str(item.price)) us.load() if not item.name in us.inventory: return False if not us.inventory[item.name].buy(): return False return True
1,136,862
domain: seattle, bothell, tacoma, pce_ap, pce_ol, pce_ielp, pce (case insensitive) args: year (required) term_name (required): Winter|Spring|Summer|Autumn curriculum_abbreviation course_number section_id student_id (student number) instructor_id (employee identification number) returns: a list of Evaluation objects
def search_evaluations(domain, **kwargs): url = "{}?{}".format(IAS_PREFIX, urlencode(kwargs)) data = get_resource(url, domain) evaluations = _json_to_evaluation(data) return evaluations
1,136,873
Takes an value and converts it to an elasticsearch representation args: value: the value to convert ranges: the list of ranges method: convertion method to use 'None': default -> converts the value to its json value 'missing_obj': adds attributes as if the value should have been a rdfclass object
def convert_value_to_es(value, ranges, obj, method=None): def sub_convert(val): if isinstance(val, BaseRdfDataType): return val.to_json elif isinstance(value, __MODULE__.rdfclass.RdfClassBase): return val.subject.sparql_uri return val if method == "missing_obj": rtn_obj = { "rdf_type": [rng.sparql_uri for rng in ranges], # pylint: disable=no-member "label": [getattr(obj, label)[0] \ for label in LABEL_FIELDS \ if hasattr(obj, label)][0]} try: rtn_obj['uri'] = value.sparql_uri rtn_obj["rdfs_label"] = NSM.nouri(value.sparql_uri) except AttributeError: rtn_obj['uri'] = "None Specified" rtn_obj['rdfs_label'] = sub_convert(value) rtn_obj['value'] = rtn_obj['rdfs_label'] return rtn_obj return sub_convert(value)
1,136,923
Returns the elasticsearch index types for the obj args: rng_def: the range defintion dictionay ranges: rdfproperty ranges
def get_idx_types(rng_def, ranges): idx_types = rng_def.get('kds_esIndexType', []).copy() if not idx_types: nested = False for rng in ranges: if range_is_obj(rng, __MODULE__.rdfclass): nested = True if nested: idx_types.append('es_Nested') return idx_types
1,136,924
Filters the range defitions based on the bound class args: obj: the rdffroperty instance
def get_prop_range_defs(class_names, def_list): try: cls_options = set(class_names + ['kdr_AllClasses']) return [rng_def for rng_def in def_list \ if not isinstance(rng_def, BlankNode) \ and cls_options.difference(\ set(rng_def.get('kds_appliesToClass', []))) < \ cls_options] except AttributeError: return []
1,136,925
Returns the value for an object that goes into the elacticsearch 'value' field args: obj: data object to update def_obj: the class instance that has defintion values
def get_es_value(obj, def_obj): def get_dict_val(item): if isinstance(item, dict): return str(item.get('value')) return str(item) value_flds = [] if def_obj.es_defs.get('kds_esValue'): value_flds = def_obj.es_defs['kds_esValue'].copy() else: # pdb.set_trace() value_flds = set(obj).difference(__ALL_IGN__) value_flds = list(value_flds) value_flds += __COMBINED__ try: obj['value'] = [obj.get(label) for label in value_flds if obj.get(label)][0] except IndexError: obj['value'] = ", ".join(["%s: %s" % (value.get('label'), value.get('value')) for prop, value in obj.items() if isinstance(value, dict) and \ value.get('label')]) if isinstance(obj['value'], list): obj['value'] = ", ".join([get_dict_val(item) for item in obj['value']]) else: obj['value'] = get_dict_val(obj['value']) if str(obj['value']).strip().endswith("/"): obj['value'] = str(obj['value']).strip()[:-1].strip() if not obj['value']: obj['value'] = obj.get('uri', '') return obj
1,136,927
Returns object with label for an object that goes into the elacticsearch 'label' field args: obj: data object to update def_obj: the class instance that has defintion values
def get_es_label(obj, def_obj): label_flds = LABEL_FIELDS if def_obj.es_defs.get('kds_esLabel'): label_flds = def_obj.es_defs['kds_esLabel'] + LABEL_FIELDS try: for label in label_flds: if def_obj.cls_defs.get(label): obj['label'] = def_obj.cls_defs[label][0] break if not obj.get('label'): obj['label'] = def_obj.__class__.__name__.split("_")[-1] except AttributeError: # an attribute error is caused when the class is only # an instance of the BaseRdfClass. We will search the rdf_type # property and construct a label from rdf_type value if def_obj.get('rdf_type'): obj['label'] = def_obj['rdf_type'][-1].value[-1] else: obj['label'] = "no_label" return obj
1,136,928