docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Returns the object updated with the 'id' and 'uri' fields for the elasticsearch document args: obj: data object to update def_obj: the class instance that has defintion values
def get_es_ids(obj, def_obj): try: path = "" for base in [def_obj.__class__] + list(def_obj.__class__.__bases__): if hasattr(base, 'es_defs') and base.es_defs: path = "%s/%s/" % (base.es_defs['kds_esIndex'][0], base.es_defs['kds_esDocType'][0]) continue except KeyError: path = "" if def_obj.subject.type == 'uri': obj['uri'] = def_obj.subject.clean_uri obj['id'] = path + make_es_id(obj['uri']) elif def_obj.subject.type == 'bnode': obj['id'] = path + def_obj.bnode_id() else: obj['id'] = path + make_es_id(str(obj['value'])) return obj
1,136,929
Creates the id based off of the uri value Args: ----- uri: the uri to conver to an elasticsearch id
def make_es_id(uri): try: uri = uri.clean_uri except AttributeError: pass return sha1(uri.encode()).hexdigest()
1,136,930
Coverts an input to one of the rdfdatatypes classes Args: value: any rdfdatatype, json dict or vlaue class_type: "literal", "uri" or "blanknode" datatype: "xsd:string", "xsd:int" , etc
def pyrdf2(value, class_type=None, datatype=None, lang=None, **kwargs): try: if isinstance(value, dict): # test to see if the type is a literal, a literal will have a another # dictionary key call datatype. Feed the datatype to the lookup to # return the value else convert it to a XsdString if value.get('type') == "literal": if not value.get("datatype"): return XsdString(value['value']) else: try: if value.get("lang"): # The lang keyword only applies to XsdString types return DT_LOOKUP[value['datatype']](value['value'], lang=value.get("lang")) else: return DT_LOOKUP[value['datatype']](value['value']) except: rtn_val = BaseRdfDataType(value['value']) rtn_val.datatype = Uri(value['datatype']) return rtn_val else: return DT_LOOKUP[value['type']](value['value']) elif isinstance(value, BaseRdfDataType): return value else: return DT_LOOKUP[type(value)](value) except: pdb.set_trace() pass
1,136,967
Coverts an input to one of the rdfdatatypes classes Args: value: any rdfdatatype, json dict or vlaue class_type: "literal", "uri" or "blanknode" datatype: "xsd:string", "xsd:int" , etc kwargs: lang: language tag
def pyrdf(value, class_type=None, datatype=None, **kwargs): if isinstance(value, BaseRdfDataType): return value if isinstance(value, dict): value = value.copy() class_type = value.pop('type') try: datatype = value.pop('datatype') except KeyError: datatype = __TYPE_MATCH__[class_type] kwargs = value value = kwargs.pop('value') if not class_type: class_type = 'literal' if not datatype: datatype = type(value) try: # print("pyrdf: ", value, " class_type: ", class_type, " datatype: ", datatype) return __DT_LOOKUP__[class_type][datatype](value, **kwargs) except KeyError: rtn_val = BaseRdfDataType(value) rtn_val.datatype = Uri(datatype) return rtn_val
1,136,968
Creates instance of a component descriptor Args: group: logical group: 'pip-services-runtime', 'pip-services-logging' type: external type: 'cache', 'services' or 'controllers' kind - implementation: 'memory', 'file' or 'memcached' name - internal content version: compatibility version: '1.0'. '1.5' or '10.4'
def __init__(self, group, tipe, kind, name, version): group = None if "*" == group else group tipe = None if "*" == tipe else tipe kind = None if "*" == kind else kind name = None if "*" == name else name version = None if "*" == version else version self._group = group self._type = tipe self._kind = kind self._name = name self._version = version
1,137,009
Matches this descriptor to another descriptor exactly. Args: descriptor: another descriptor to match this one. Returns: True if descriptors match or False otherwise.
def exact_match(self, descriptor): return self._exact_match_field(self._group, descriptor.get_group()) \ and self._exact_atch_field(self._type, descriptor.get_type()) \ and self._exact_match_field(self._kind, descriptor.get_kind()) \ and self._exact_match_field(self._name, descriptor.get_name()) \ and self._exact_match_field(self._version, descriptor.get_version())
1,137,010
Constructor args: **kwargs: degree: int, default=1 degree of polynomial fitting resolution: int, default=1000
def __init__(self, **kwargs): BestFit.__init__(self, **kwargs) # set defaults self.set_defaults(degree=1, resolution=1000)
1,137,036
Returns bestfit_y value args: bestfit_x: scalar, array_like x value return: scalar, array_like bestfit y value
def bestfit_func(self, bestfit_x): bestfit_x = np.array(bestfit_x) if not self.done_bestfit: raise KeyError("Do do_bestfit first") bestfit_y = 0 for idx, val in enumerate(self.fit_args): bestfit_y += val * (bestfit_x ** (self.args.get("degree", 1) - idx)) return bestfit_y
1,137,038
Get the SHA a branch's HEAD points to. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. name The name of the branch. Returns: The requested SHA.
def get_branch_sha(profile, name): ref = "heads/" + name data = refs.get_ref(profile, ref) head = data.get("head") sha = head.get("sha") return sha
1,137,079
Fetch a branch. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. name The name of the branch to fetch. Returns: A dict with data baout the branch.
def get_branch(profile, name): ref = "heads/" + name data = refs.get_ref(profile, ref) return data
1,137,080
Create a branch. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. name The name of the new branch. branch_off The name of a branch to create the new branch off of. Returns: A dict with data about the new branch.
def create_branch(profile, name, branch_off): branch_off_sha = get_branch_sha(profile, branch_off) ref = "heads/" + name data = refs.create_ref(profile, ref, branch_off_sha) return data
1,137,081
Move a branch's HEAD to a new SHA. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. name The name of the branch to update. sha The commit SHA to point the branch's HEAD to. Returns: A dict with data about the branch.
def update_branch(profile, name, sha): ref = "heads/" + name data = refs.update_ref(profile, ref, sha) return data
1,137,082
Delete a branch. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. name The name of the branch to delete. Returns: The response of the DELETE request.
def delete_branch(profile, name): ref = "heads/" + name data = refs.delete_ref(profile, ref) return data
1,137,083
Merge a branch into another branch. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. branch The name of the branch to merge. merge_into The name of the branch you want to merge into. Returns: A dict wtih data about the merge.
def merge(profile, branch, merge_into): data = merges.merge(profile, branch, merge_into) return data
1,137,084
Get the installed version for this project. Args: vcs (easyci.vcs.base.Vcs) Returns: str - version number Raises: VersionNotInstalledError
def get_installed_version(vcs): version_path = _get_version_path(vcs) if not os.path.exists(version_path): raise VersionNotInstalledError with open(version_path, 'r') as f: return f.read().strip()
1,137,247
Set the installed version for this project. Args: vcs (easyci.vcs.base.Vcs) version (str)
def set_installed_version(vcs, version): version_path = _get_version_path(vcs) with open(version_path, 'w') as f: f.write(version)
1,137,248
Create a new instance of :class:`PygalleBaseClass` # Arguments args: kwargs: # Returns: PygalleBaseClass: An instance of :class:`PygalleBaseClass`
def __init__(self, **kwargs) -> 'PygalleBaseClass': # pylint: disable=unused-argument self.options = kwargs self.init_properties() \ .set_uid() \ .set_class_name() \ .set_category()
1,137,302
Save results matching `patterns` at `result_path`. Args: vcs (easyci.vcs.base.Vcs) - the VCS object for the actual project (not the disposable copy) signature (str) - the project state signature result_path (str) - the path containing the result, usually a disposable copy of the project patterns (str) - `rsync`-compatible patterns matching test results to save.
def save_results(vcs, signature, result_path, patterns): results_directory = _get_results_directory(vcs, signature) if not os.path.exists(results_directory): os.makedirs(results_directory) with open(os.path.join(results_directory, 'patterns'), 'w') as f: f.write('\n'.join(patterns)) if not os.path.exists(os.path.join(results_directory, 'results')): os.mkdir(os.path.join(results_directory, 'results')) includes = ['--include={}'.format(x) for x in patterns] cmd = ['rsync', '-r'] + includes + ['--exclude=*', os.path.join(result_path, ''), os.path.join(results_directory, 'results', '')] subprocess.check_call(cmd)
1,137,450
Sync the saved results for `signature` back to the project. Args: vcs (easyci.vcs.base.Vcs) signature (str) Raises: ResultsNotFoundError
def sync_results(vcs, signature): results_directory = _get_results_directory(vcs, signature) if not os.path.exists(results_directory): raise ResultsNotFoundError with open(os.path.join(results_directory, 'patterns'), 'r') as f: patterns = f.read().strip().split() includes = ['--include={}'.format(x) for x in patterns] cmd = ['rsync', '-r'] + includes + ['--exclude=*', os.path.join( results_directory, 'results', ''), os.path.join(vcs.path, '')] subprocess.check_call(cmd)
1,137,451
Removed saved results for this signature Args: vcs (easyci.vcs.base.Vcs) signature (str) Raises: ResultsNotFoundError
def remove_results(vcs, signature): results_directory = _get_results_directory(vcs, signature) if not os.path.exists(results_directory): raise ResultsNotFoundError shutil.rmtree(results_directory)
1,137,452
Returns the list of signatures for which test results are saved. Args: vcs (easyci.vcs.base.Vcs) Returns: List[str]
def get_signatures_with_results(vcs): results_dir = os.path.join(vcs.private_dir(), 'results') if not os.path.exists(results_dir): return [] rel_paths = os.listdir(results_dir) return [p for p in rel_paths if os.path.isdir(os.path.join(results_dir, p))]
1,137,453
Registers the given message type in the local database. Args: message: a message.Message, to be registered. Returns: The provided message.
def RegisterMessage(self, message): desc = message.DESCRIPTOR self._symbols[desc.full_name] = message if desc.file.name not in self._symbols_by_file: self._symbols_by_file[desc.file.name] = {} self._symbols_by_file[desc.file.name][desc.full_name] = message self.pool.AddDescriptor(desc) return message
1,137,772
Gets all the messages from a specified file. This will find and resolve dependencies, failing if they are not registered in the symbol database. Args: files: The file names to extract messages from. Returns: A dictionary mapping proto names to the message classes. This will include any dependent messages as well as any messages defined in the same file as a specified message. Raises: KeyError: if a file could not be found.
def GetMessages(self, files): result = {} for f in files: result.update(self._symbols_by_file[f]) return result
1,137,773
Create a new Event instance. Params: name (string): Full event name context (bubbler.Bubbler): Bubbler context
def __init__(self, name, context): self.name = name self.context = context self.handlers = [] self.all_handlers = [] self.executed = 0 self.all_executed = 0 self.result = [] self.all_result = [] self.stopped = False
1,137,863
Create a new Bubbler context Params: context_name (string): Name of this context Raises: bubbler.Error: If this context name already exists
def __init__(self, context_name = 'default'): if context_name in self.contexts: raise Error("A context named '%s' already exists" % (context_name,)) self.name = context_name self.handlers = {} self.contexts[self.name] = self
1,137,865
Get a context by name, create the default context if it does not exist Params: context_name (string): Context name Raises: KeyError: If the context name does not exist Returns: bubbler.Bubbler: Named context
def getContext(self, context_name = 'default'): if context_name == 'default' and 'default' not in self.contexts: self('default') return self.contexts[context_name]
1,137,866
Bind a callback to an event Params: event_name (string): Name of the event to bind to callback (callable): Callback that will be called when the event is triggered first (boolean): If True, this callback is placed before all the other events already registered for this event, otherwise it is placed at the end.
def bind(self, event_name, callback, first = False): if event_name not in self.handlers: self.handlers[event_name] = [] if first: self.handlers[event_name].insert(0, callback) else: self.handlers[event_name].append(callback)
1,137,867
Unbind a callback from an event Params: callback (callable): Callback to unbind event_name (string): If None (default) this callback is removed from every event to which it's bound. If a name is given then it is only removed from that event, if it is bound to that event.
def unbind(self, callback, event_name = None): if event_name is None: for name in self.handlers.keys(): self.unbind(callback, name) return if callback in self.handlers[event_name]: self.handlers[event_name].remove(callback)
1,137,868
Trigger an event on this context. Params: event_name (string): Event name to trigger Args and kwargs are passed to each handler - see the bubbler.Event class for more information. Returns: bubbler.Event: Event instance after execution of all handlers
def trigger(self, event_name, *args, **kwargs): ev = Event(event_name, self) ev.trigger(*args, **kwargs) return ev
1,137,869
Object initialization Args: key: String name of an attributes key that represents the unique identify of the request attributes: Dictionary whose keys match the string values of the request attribute's names and values correspond the the request attribute values
def __init__(self, key, attributes): self._attributes_normalized = {} self._set_attributes(attributes if attributes else {}) self._key_normalized = '' self._set_key(key)
1,138,052
Pick informations from :class:`.MARCXMLRecord` object and use it to build :class:`.SemanticInfo` structure. Args: xml (str/MARCXMLRecord): MarcXML which will be converted to SemanticInfo. In case of str, ``<record>`` tag is required. Returns: structure: :class:`.SemanticInfo`.
def from_xml(xml): hasAcquisitionFields = False acquisitionFields = [] isClosed = False isSummaryRecord = False contentOfFMT = "" parsedSummaryRecordSysNumber = "" summaryRecordSysNumber = "" parsed = xml if not isinstance(xml, MARCXMLRecord): parsed = MARCXMLRecord(str(xml)) # handle FMT record if "FMT" in parsed.controlfields: contentOfFMT = parsed["FMT"] if contentOfFMT == "SE": isSummaryRecord = True if "HLD" in parsed.datafields or "HLD" in parsed.controlfields: hasAcquisitionFields = True if "STZ" in parsed.datafields: acquisitionFields.extend(parsed["STZa"]) acquisitionFields.extend(parsed["STZb"]) def sign_and_author(sign): return [sign.replace(" ", "")] + sign.other_subfields.get("b", []) # look for catalogization fields for orig_sign in parsed["ISTa"]: sign = orig_sign.replace(" ", "") # remove spaces if sign.startswith("sk"): hasAcquisitionFields = True acquisitionFields.extend(sign_and_author(orig_sign)) # look whether the record was 'closed' by catalogizators for status in parsed["BASa"]: if status == "90": isClosed = True # if multiple PJM statuses are present, join them together status = "\n".join([x for x in parsed["PJMa"]]) # detect link to 'new' record, if the old one was 'closed' if status.strip(): summaryRecordSysNumber = status parsedSummaryRecordSysNumber = _parse_summaryRecordSysNumber( summaryRecordSysNumber ) return EPeriodicalSemanticInfo( hasAcquisitionFields=hasAcquisitionFields, acquisitionFields=acquisitionFields, isClosed=isClosed, isSummaryRecord=isSummaryRecord, contentOfFMT=contentOfFMT, parsedSummaryRecordSysNumber=parsedSummaryRecordSysNumber, summaryRecordSysNumber=summaryRecordSysNumber, )
1,138,153
Compress `path` to the ZIP. Args: path (str): Path to the directory. Returns: str: Path to the zipped file (in /tmp).
def path_to_zip(path): if not os.path.exists(path): raise IOError("%s doesn't exists!" % path) with tempfile.NamedTemporaryFile(delete=False) as ntf: zip_fn = ntf.name with zipfile.ZipFile(zip_fn, mode="w") as zip_file: for root, dirs, files in os.walk(path): for fn in files: zip_file.write(os.path.join(root, fn)) return zip_fn
1,138,225
Convert given `fn` to base64 and return it. This method does the process in not-so-much memory consuming way. Args: fn (str): Path to the file which should be converted. Returns: str: File encoded as base64.
def read_as_base64(fn): with open(fn) as unpacked_file: with tempfile.TemporaryFile() as b64_file: base64.encode(unpacked_file, b64_file) b64_file.flush() b64_file.seek(0) return b64_file.read()
1,138,226
Validate rule name. Arguments: name (string): Rule name. Returns: bool: ``True`` if rule name is valid.
def validate_rule_name(self, name): if not name: raise SerializerError("Rule name is empty".format(name)) if name[0] not in RULE_ALLOWED_START: msg = "Rule name '{}' must starts with a letter" raise SerializerError(msg.format(name)) for item in name: if item not in RULE_ALLOWED_CHARS: msg = ("Invalid rule name '{}': it must only contains " "letters, numbers and '_' character") raise SerializerError(msg.format(name)) return True
1,138,255
Validate variable name. Arguments: name (string): Property name. Returns: bool: ``True`` if variable name is valid.
def validate_variable_name(self, name): if not name: raise SerializerError("Variable name is empty".format(name)) if name[0] not in PROPERTY_ALLOWED_START: msg = "Variable name '{}' must starts with a letter" raise SerializerError(msg.format(name)) for item in name: if item not in PROPERTY_ALLOWED_CHARS: msg = ("Invalid variable name '{}': it must only contains " "letters, numbers and '_' character") raise SerializerError(msg.format(name)) return True
1,138,256
Serialize given datas to any object from assumed JSON string. Arguments: name (string): Name only used inside possible exception message. datas (dict): Datas to serialize. Returns: object: Object depending from JSON content.
def serialize_to_json(self, name, datas): data_object = datas.get('object', None) if data_object is None: msg = ("JSON reference '{}' lacks of required 'object' variable") raise SerializerError(msg.format(name)) try: content = json.loads(data_object, object_pairs_hook=OrderedDict) except json.JSONDecodeError as e: msg = "JSON reference '{}' raised error from JSON decoder: {}" raise SerializerError(msg.format(name, e)) else: return content
1,138,258
Serialize given datas to a nested structure where each key create an item and each other variable is stored as a subitem with corresponding value (according to key index position). Arguments: name (string): Name only used inside possible exception message. datas (dict): Datas to serialize. Returns: dict: Nested dictionnary of serialized reference datas.
def serialize_to_nested(self, name, datas): keys = datas.get('keys', None) splitter = datas.get('splitter', self._DEFAULT_SPLITTER) if not keys: msg = ("Nested reference '{}' lacks of required 'keys' variable " "or is empty") raise SerializerError(msg.format(name)) else: keys = self.value_splitter(name, 'keys', keys, mode=splitter) # Initialize context dict with reference keys context = OrderedDict() for k in keys: context[k] = OrderedDict() # Tidy each variable value to its respective item for k, v in datas.items(): # Ignore reserved internal keywords if k not in ('keys', 'structure', 'splitter'): values = self.value_splitter(name, 'values', v, mode=splitter) if len(values) != len(keys): msg = ("Nested reference '{}' has different length for " "values of '{}' and 'keys'") raise SerializerError(msg.format(name, k)) # Put each value to its respective key using position index. for i, item in enumerate(values): ref = keys[i] context[ref][k] = item return context
1,138,259
Serialize given datas to a list structure. List structure is very simple and only require a variable ``--items`` which is a string of values separated with an empty space. Every other properties are ignored. Arguments: name (string): Name only used inside possible exception message. datas (dict): Datas to serialize. Returns: list: List of serialized reference datas.
def serialize_to_list(self, name, datas): items = datas.get('items', None) splitter = datas.get('splitter', self._DEFAULT_SPLITTER) if items is None: msg = ("List reference '{}' lacks of required 'items' variable " "or is empty") raise SerializerError(msg.format(name)) else: items = self.value_splitter(name, 'items', items, mode=splitter) return items
1,138,261
Serialize given datas to a string. Simply return the value from required variable``value``. Arguments: name (string): Name only used inside possible exception message. datas (dict): Datas to serialize. Returns: string: Value.
def serialize_to_string(self, name, datas): value = datas.get('value', None) if value is None: msg = ("String reference '{}' lacks of required 'value' variable " "or is empty") raise SerializerError(msg.format(name)) return value
1,138,262
Get available manifest reference names. Every rules starting with prefix from ``nomenclature.RULE_REFERENCE`` are available references. Only name validation is performed on these references. Arguments: datas (dict): Data where to search for reference declarations. Returns: list: List of every available reference names. This is the real name unprefixed.
def get_available_references(self, datas): names = [] for k, v in datas.items(): if k.startswith(RULE_REFERENCE): names.append(k[len(RULE_REFERENCE)+1:]) return names
1,138,265
Get enabled manifest references declarations. Enabled references are defined through meta references declaration, every other references are ignored. Arguments: datas (dict): Data where to search for reference declarations. This is commonly the fully parsed manifest. meta_references (list): List of enabled reference names. Returns: collections.OrderedDict: Serialized enabled references datas.
def get_enabled_references(self, datas, meta_references): references = OrderedDict() for section in meta_references: references[section] = self.get_reference(datas, section) return references
1,138,266
Serialize datas to manifest structure with metas and references. Only references are returned, metas are assigned to attribute ``ManifestSerializer._metas``. Arguments: datas (dict): Data where to search for reference declarations. This is commonly the fully parsed manifest. Returns: collections.OrderedDict: Serialized enabled references datas.
def serialize(self, datas): self._metas = OrderedDict({ 'references': self.get_meta_references(datas), }) return self.get_enabled_references(datas, self._metas['references'])
1,138,267
Updates the value of the given key in the file. Args: key (str): Key of the property to update. value (str): New value of the property. Return: bool: Indicates whether or not a change was made.
def set(self, key, value): changed = super().set(key=key, value=value) if not changed: return False self._log.info('Saving configuration to "%s"...', self._filename) with open(self._filename, 'w') as stream: stream.write(self.content) self._log.info('Saved configuration to "%s".', self._filename) return True
1,138,707
tests the validity of the attribute supplied in the config args: attr: the name of tha attribute reg: the requirement definition config: the config obj
def test_attr(attr, req, parent_attr=None, **kwargs): def test_format(attr_val, fmat): rtn_obj = {"reason": "format"} if fmat == 'url': if not reg_patterns.url.match(attr_val): if reg_patterns.url_no_http.match(attr_val): return "missing http or https" return "invalid url format" if fmat == "namespace": uri = clean_iri(attr_val) if not reg_patterns.url.match(uri): return "invalid uri format" elif uri.strip()[-1] not in ["/", "#"]: return "does not end with / or #" if fmat == "directory": env = 'win' if os.path.sep == '/': env = 'linux' if env == 'linux': if not reg_patterns.dir_linux.match(attr_val): if not reg_patterns.dir_win.match(attr_val): return "invalid directory path" log.warning("linux/mac env: windows directory path %s", attr_val) if env == 'win': if not reg_patterns.dir_win.match(attr_val): if not reg_patterns.dir_linux.match(attr_val): return "invalid directory path" log.warning("windows env: linux/mac directory path %s", attr_val) if fmat == "writable": try: if not is_writable_dir(attr_val, mkdir=True): return "path is not writable" except: return "path is not writable" return None rtn_obj = {} try: if IgnoreClass in attr: return except TypeError: pass req_key = None if req.get("req_items"): req_key = get_req_key(req['req_items']) if req.get("required") and attr in [None, '']: if "default" in req: rtn_obj["set"] = req['default'] rtn_obj["reason"] = "using default" else: rtn_obj["reason"] = "missing" rtn_obj['msg'] = "missing required item" return rtn_obj if attr is None: return {} if not isinstance(attr, req['type']): rtn_obj.update({"reason": "type", "msg": "should be of type %s" % req['type']}) return rtn_obj if req['type'] == list: error_list = [] if req['item_type'] == str and req.get("format"): fmat = req['format'] for item in attr: msg = test_format(item, fmat) if msg: error_list.append(msg) elif req['item_type'] == dict: for idx, item in enumerate(attr): item_errors = [] dict_errors = {} if req['item_dict'].get("req_items"): req_key = get_req_key(req['item_dict']['req_items']) new_req = update_req(item.get(req_key), req) for key, item_req in new_req['item_dict'].items(): msg = test_attr(item.get(key), item_req, item) if msg: msg['value'] = item.get(key) dict_errors[key] = msg if dict_errors: item_copy = copy.deepcopy(item) item_copy.update(dict_errors) item_copy['__list_idx__'] = idx item_copy['__error_keys__'] = list(dict_errors) error_list.append(item_copy) req_key = None req_values = [] if req.get("req_items") and not kwargs.get("skip_req_items"): #determine the matching key req_key = get_req_key(req['req_items']) for item in req['req_items']: value = item[req_key] req_values.append(value) if not value in [item[req_key] for item in attr]: error_item = { "__list_idx__": None, "__msg__": "%s: '%s' is a required item" % \ (req_key, value), "__dict_params__": item.get("dict_params"), "__error_keys__": [ky for ky, val in req['item_dict'].items() if val.get("required") and ky != req_key and ky not in item.get("default_values", {})]} missing_dict = {"msg": "required", "reason": "missing", "value": None } error_req = {ky: missing_dict for ky, val in req['item_dict'].items() if val.get("required")} error_req[req_key] = value for def_ky, def_val in item.get("default_values", {}).items(): error_req[def_ky] = def_val error_item.update(error_req) error_list.append(error_item) if req.get("optional_items"): if not req_key: req_key = [key for key in req['optional_items'][0].keys() if key != "description"][0] optional_values = [item[req_key] for item in attr] optional_values = set(optional_values + req_values) cfg_values = set([item[req_key] for item in attr]) not_allowed = cfg_values.difference(optional_values) if not_allowed: for name in not_allowed: idx, item = [(i, val) for i, val in enumerate(attr) if val[req_key] == name][0] error_item = { "__list_idx__": idx, "__msg__": "'%s:%s' is not an allowed item. " "Allowed items are: " % \ (req_key, name, list(optional_values)), "__error_keys__": [ky for ky, val in req['item_dict'].items() if val.get("required") and ky != key]} missing_dict = {"msg": "Not an allowed option", "reason": "not_allowed", "value": None } error_req = {ky: missing_dict for ky, val in req['item_dict'].items() if val.get("required")} error_req[key] = value error_item.update(error_req) error_list.append(error_item) if error_list: rtn_obj.update({"reason": "list_error", "items": error_list}) return rtn_obj return rtn_obj if req['type'] == dict: if req.get('item_type') == str and req.get("format"): fmat = req['format'] # error_list = [] error_dict = {} for key, item in attr.items(): msg = test_format(item, fmat) if msg: error_dict[key] = {"value": item, "msg": msg} # error_list.append({key: msg}) elif req.get('item_type') == dict: for item, value in attr.items(): item_errors = [] new_req_dict = update_req(item.get(req_key), req) for key, item_req in new_req_dict.items(): msg = test_attr(value.get(key), item_req, value) if msg: item_errors.append({value.get(key): msg}) if item_errors: error_list.append({item: item_errors}) # if item_type is elif not req.get("item_type"): # if isinstance(attr, dict) and isinstance(parent_attr, dict) # parent_attr.update(attr) error_dict = {} if error_dict: rtn_obj.update({"reason": "dict_error", "items": error_dict}) return rtn_obj return if req.get("format"): fmat = req['format'] rtn_obj["reason"] = "format" msg = test_format(attr, fmat) if msg: rtn_obj.update({"msg": msg, "value": attr}) return rtn_obj if req.get("options"): options = get_options_from_str(req['options'],**parent_attr) msg = None if not attr in options: msg = "'%s' is not an allowed option, choose from %s" % ( attr, options) if msg: rtn_obj.update({"msg": msg, "value": attr}) return rtn_obj
1,138,859
Takes a requirement and updates it based on a specific attribute key args: name: the name of the attribute old_req: the requirement definition
def update_req(name, old_req, config={}): if not name: return old_req new_req = copy.deepcopy(old_req) del_idxs = [] if "req_items" in old_req: req_key = get_req_key(old_req['req_items']) for i, item in enumerate(old_req['req_items']): if name == item[req_key] and item.get("dict_params"): for param, value in item['dict_params'].items(): new_req['item_dict'][param].update(value) if item.get("remove_if"): test_val = get_attr(config, item['remove_if']['attr']) if test_val == item['remove_if']['value']: del_idxs.append(i) for idx in sorted(del_idxs, reverse=True): del new_req['req_items'][idx] return new_req
1,138,860
Returns a list of options from a python object string args: obj_str: python list of options or a python object path Example: "rdfframework.connections.ConnManager[{param1}]" kwargs: * kwargs used to format the 'obj_str'
def get_options_from_str(obj_str, **kwargs): if isinstance(obj_str, list): return obj_str try: obj = get_obj_frm_str(obj_str, **kwargs) if obj: return list(obj) except AttributeError: pass return []
1,138,861
Reads through and error object and replaces the error dict with the value args: obj: the error object/dictionary
def strip_errors(obj): rtn_obj = copy.deepcopy(obj) try: del rtn_obj["__error_keys__"] except KeyError: pass for key in obj.get('__error_keys__', []): rtn_obj[key] = rtn_obj[key]['value'] return rtn_obj
1,138,862
Reads a python config file and initializes the cloass args: obj: the config data
def __load_config__(self, config, **kwargs): # The configuration cannot be initialised more than once if self.__is_initialized__: raise ImportError("RdfConfigManager has already been initialized") self.__set_cfg_reqs__(**kwargs) self.__set_cfg_attrs__(config, **kwargs) self.__config__['TURN_ON_VOCAB'] = kwargs.get("turn_on_vocab", self.__config__.get("TURN_ON_VOCAB", self.__cfg_reqs__["TURN_ON_VOCAB"]['default'])) errors = self.__verify_config__(self.__config__, **kwargs) self.__reslove_errors__(errors, **kwargs) log.info("CONFIGURATION validated") self.__is_initialized__ = True log.info("Initializing directories") self.__initialize_directories__(**kwargs) log.info("Setting Logging") self.__set_logging__(**kwargs) log.info("setting RDF Namespaces") self.__load_namespaces__(**kwargs) log.info("Initializing Connections") self.__initialize_conns__(**kwargs) log.info("Registering RML Mappings") self.__register_mappings__(**kwargs) self.__run_defs__(**kwargs)
1,138,865
reads through the config object and validates missing arguments args: config: the config object
def __verify_config__(self, config, **kwargs): log.info("Verifiying config settings") error_dict = OrderedDict() for attr, req in self.__cfg_reqs__.items(): req = update_req(attr, req, config) result = test_attr(get_attr(config, attr), req) if result: if 'value' not in result \ and result['reason'] not in ['dict_error', 'list_error']: result['value'] = get_attr(config, attr) error_dict[attr] = result return error_dict
1,138,870
Determines how to deal with and config issues and resolves them args: errors: list of config errors
def __reslove_errors__(self, errors={}, **kwargs): def process_entry(req_type, new_value, old_value=''): new_value = new_value.strip() if new_value == '': return old_value elif new_value.lower() == 'help()': print(format_multiline(__MSGS__['help'])) return old_value elif new_value.lower() == 'clear()': return ClearClass elif new_value.lower() == 'none()': return None elif new_value.lower() == 'ignore()': rtn_val = (IgnoreClass, old_value) return rtn_val try: return req_type(new_value) except: try: return eval(new_value) except: raise def get_missing(self, attr): req = self.__cfg_reqs__[attr] errors = {"msg": '', "value": ''} if req['type'] == str: while True: err = '' if errors['msg']: err = "{} [{}]".format(colors.warning(errors['msg']), colors.fail(value)) print("Enter {attr}: {desc} {err}" "".format(attr=colors.fail(attr), desc=colors.cyan(req['description']), err=err)) value = input('-> ') try: value = process_entry(req['type'], value, errors['value']) errors = test_attr(value, req) if not errors and value != '': return value errors['value'] = value except SyntaxError: pass elif req['type'] == list: return [] def fix_format(self, attr, error, value=None): req = self.__cfg_reqs__[attr] if req['type'] == str: while True: print("{err} {attr}: {desc} Error: {error}" "\n\tEnter corrected value [{val}]" "".format(err=colors.fail("ERROR"), attr=colors.fail(attr), desc=colors.cyan(req['description']), error=colors.warning(error.get("msg")), val=colors.fail(value))) val = input('-> ') try: val = process_entry(req['type'], val, value) new_err = test_attr({attr: val}, req) if not new_err: return val except SyntaxError: pass elif req['type'] == list: return [] def fix_str(self, attr, key, value): req = self.__cfg_reqs__[attr] while True: print("{err} {key} | value: {val} | *** {msg}\n\t{desc}\n - " "Enter corrected value [{val}]: " "".format(err=colors.fail("ERROR"), key=colors.warning(key), val=colors.fail(value['value']), msg=colors.yellow(value['msg']), desc=colors.green(req['description']))) new_val = input("-> ") try: new_val = process_entry(req['type'], new_val, value) errors = test_attr({key: new_val}, req) if not errors: return new_val value = errors['items'][key] except SyntaxError: pass def fix_item(self, req, obj): for key, val in req['item_dict'].items(): while True: new_req = copy.deepcopy(req) errors = test_attr([strip_errors(obj)], new_req, skip_req_items=True) for ky in errors.get('items', [{}])[0].get('__error_keys__', []): obj[ky] = errors['items'][0][ky] if errors: obj['__error_keys__'] = \ errors['items'][0]['__error_keys__'] else: idx = obj.get('__list_idx__') obj = strip_errors(obj) obj.update({'__list_idx__': idx, '__error_keys__': []}) desc = format_multiline(val.get("description", "")) desc_items = ["%s: %s" % (i_key, colors.cyan(format_multiline(i_val))) for i_key, i_val in sorted(val.items()) if i_key.lower() not in ["doc", "options"]] if val.get("doc"): try: doc = get_obj_frm_str(val['doc'], **obj) except AttributeError: doc = None if doc: desc_items.append("__doc__: %s" % doc.__doc__) if val.get("options"): options = get_options_from_str(val['options'], **obj) desc_items.append("options: %s" % colors.warning(options)) desc = "\n\t".join(desc_items) if isinstance(obj.get(key), dict) and \ (obj[key].get('msg') or obj[key].get('reason')): print("{err} {key} | value: {val} | *** {msg}\n\t{desc}\n - Enter corrected value [{val}]: ".format( err=colors.fail("ERROR"), key=colors.warning(key), val=colors.fail(obj[key]['value']), msg=colors.yellow(obj[key].get('msg') \ or obj[key].get('reason')), desc=colors.green(desc))) new_val = input("-> ") try: new_val = process_entry(val['type'], new_val, obj[key]['value']) except (SyntaxError, NameError): obj[key] = {"msg": "SyntaxError", "value": new_val} continue # new_val = new_val or obj[key]['value'] else: print("{ok} {key} | value: {val}\n\t{desc}\n - Enter to keep current value [{val}]: ".format( ok=colors.green("OK"), key=colors.lcyan(key), val=colors.green(obj.get(key)), desc=desc)) new_val = input("-> ") try: new_val = process_entry(val['type'], new_val, obj.get(key)) except SyntaxError: obj[key] = {"msg": "SyntaxError", "value": new_val} continue errors = test_attr(new_val, val, obj) if not errors: if key == 'kwargs' and new_val: obj.update(new_val) try: del obj['kwargs'] except KeyError: pass else: obj[key] = new_val try: obj['__error_keys__'].remove(key) except ValueError: pass errors = test_attr([strip_errors(obj)], new_req, skip_req_items=True) if key not in errors.get('items', [{}])[0].get('__error_keys__', []): break else: errors["value"] = new_val obj[key] = errors return {key: value for key, value in obj.items() if not key.startswith("__")} def cycle_errors(self, errors, cfg_obj): for attr, err in errors.items(): if err.get("set"): cfg_obj[attr] = err['set'] elif err['reason'] == "format": cfg_obj[attr] = fix_format(self, attr, err, cfg_obj.get(attr)) elif err['reason'] == "missing": cfg_obj[attr] = get_missing(self, attr) elif err['reason'] == "list_error": req = self.__cfg_reqs__[attr] #['item_dict'] print("Correcting list items for configuration item: \n\n", "***", attr, "****\n") for item in err['items']: new_item = fix_item(self, req, item) if item['__list_idx__'] == None: try: cfg_obj[attr].append(new_item) except KeyError: cfg_obj[attr] = [new_item] else: cfg_obj[attr][item['__list_idx__']] = new_item elif err['reason'] == "dict_error": if self.__cfg_reqs__[attr]['item_type'] == dict: req = self.__cfg_reqs__[attr] #['item_dict'] elif self.__cfg_reqs__[attr]['item_type'] == str: req = self.__cfg_reqs__[attr] print("Correcting dictionay for item:\n\n", colors.warning("**** %s ****\n" % attr)) for item, val in err['items'].items(): new_val = fix_str(self, attr, item, val) cfg_obj[attr][item] = new_val if not errors: return msg_kwargs = dict(time=datetime.datetime.now(), err_msg=self.__format_err_summary__(errors), cfg_path=self.__config_file__, err_path=self.__err_file__) if kwargs.get("verify") == False: log.warning("IGNORING BELOW CONFIGURATION ERRORS") log.warning(self.__make_error_msg__(errors, False, **kwargs)) self.__write_error_file__(errors, **kwargs) return print(format_multiline(__MSGS__["initial"], **msg_kwargs)) while True: if kwargs.get("exit_on_error") == True: resolve_choice = "2" else: print(format_multiline(__MSGS__["resolve_options"])) resolve_choice = input("-> ") if resolve_choice.strip() == "2": sys.exit(self.__make_error_msg__(errors, **kwargs)) elif resolve_choice.strip() in ["", "1"]: print(format_multiline(__MSGS__['help'])) break while True: cycle_errors(self, errors, self.__config__) errors = self.__verify_config__(self.__config__, **kwargs) if not errors: break self.__save_config__(**kwargs) self.__remove_ignore__(**kwargs)
1,138,871
Applies any new requirements args: requirements: dictionary of attribute requirements kwargs: remove_reqs: list of requirement names to remove
def __set_cfg_reqs__(self, requirements=None, **kwargs): if requirements: self.__cfg_reqs__.update(requirements) for attr in kwargs.get('remove_reqs', []): try: del self.__cfg_reqs__[attr] except KeyError: pass
1,138,877
Formats the error dictionary for printing args: errors: the error dictionary indent: the indent level in number of spaces
def __format_err_summary__(self, errors, indent=0, initial=True): ind_interval = 5 parts = [] ind = ''.ljust(indent, ' ') curr_err = copy.deepcopy(errors) msg_str = "{indent}{attr}: {val}{msg}" good_dict = {} if errors.get("__error_keys__"): line = colors.hd(''.ljust(50, '-')) parts.append(colors.hd("{}index number: {}".format(ind, errors.get("__list_idx__")))) parts.append("{}{}".format(ind, line)) curr_err = {key: curr_err[key] for key in errors['__error_keys__']} indent += ind_interval ind = ''.ljust(indent, ' ') good_dict = {key: value for key, value in errors.items() if key not in errors['__error_keys__'] and not key.startswith("__")} for attr, value in curr_err.items(): msg = '' val = '' if not value.get('items'): val = "[{}] error: ".format( colors.lcyan(value.get("value", "None"))) msg = colors.warning(value.get("msg", value.get("reason"))) parts.append(msg_str.format(indent=ind, attr=colors.fail(attr), val=val, msg=msg)) if value.get('items'): if isinstance(value['items'], list): for item in value['items']: parts += self.__format_err_summary__(item, indent + ind_interval, False) elif isinstance(value['items'], dict): sub_ind = ''.ljust(indent + ind_interval, ' ') for key, value in value['items'].items(): val = "[{}] error: ".format( colors.lcyan(value.get("value", "None"))) msg = colors.warning(value.get("msg", value.get("reason"))) parts.append(msg_str.format(indent=sub_ind, val=val, attr=colors.fail(key), msg=msg)) for attr, value in good_dict.items(): parts.append(msg_str.format(indent=ind, val=colors.blue(value), msg="", attr=colors.blue(attr))) if initial: return "\n".join(parts) else: return parts
1,138,884
Formats the current configuration for saving to file args: obj: the config object initial: bool argument for recursive call catching kwargs: indent: the indent level in number of spaces
def __format_save_config__(self, obj, attr_reqs, initial=True, **kwargs): ind_interval = 5 ind = ''.ljust(kwargs.get('indent', 0), ' ') ind2 = ind + ''.ljust(ind_interval, ' ') parts = [] curr_obj = copy.deepcopy(obj) # comment_kwargs = copy.deepcopy(kwargs) # comment_kwargs['prepend'] = "# " attr_str = "{cmt}{attr} = {value}" good_dict = {} pp_kwargs = {key: value for key, value in kwargs.items() if key in ['indent', 'depth']} for attr, req in attr_reqs.items(): if req.get("description"): parts.append(format_multiline(req['description'], prepend="## ", max_width=78, **pp_kwargs)) value = obj.get(attr, req.get('standard', req.get('default'))) if isinstance(value, tuple) and value: if value[0] == IgnoreClass: value = value[1] parts.append("#! Ignored errors for this item") if attr in obj: parts.append(attr_str.format(attr=attr, value=pprint.pformat(value, **pp_kwargs), cmt='')) else: parts.append(attr_str.format(attr=attr, value=str(value), cmt='# ')) parts.append("\n#! *** non specified attributes ***\n") for attr, value in obj.items(): if attr not in attr_reqs: parts.append(attr_str.format(attr=attr, value=pprint.pformat(value, **pp_kwargs), cmt='')) return "\n\n".join(parts)
1,138,885
Attempts to populate an item's information with it's ID, returns result Note that the item id must already be set or otherwise supplied and that the item must exist in the associated user's inventory. Parameters: usr (User) -- User who has the item itemID (str) -- The item's object ID Returns bool - True if successful, false otherwise Raises parseException
def populate(self, usr = None, itemID = None): if not self.id and not itemID: return False if not self.usr and not user: return False # Object's ID is used by default over any provided ID if self.id: itemID = self.id # Same with the user if self.usr: usr = self.usr pg = usr.getPage("http://www.neopets.com/iteminfo.phtml?obj_id=" + str(itemID), vars = {'Referer': 'http://www.neopets.com/objects.phtml?type=inventory'}) # Verify valid ID if "not in your inventory" in pg.content: logging.getLogger("neolib.item").exception("Invalid ID given, could not populate. ID: " + itemID) return False try: self.img = pg.table.img['src'] self.name = pg.table.find_all("td")[1].text.split(" : ")[1].replace("Owner", "") self.desc = pg.find_all("div", align="center")[1].i.text stats = pg.table.next_sibling.table.find_all("td") self.type = stats[1].text self.weight = stats[3].text self.rarity = stats[5].text self.estVal = stats[7].text except Exception: logging.getLogger("neolib.item").exception("Failed to parse item details.", {'pg': pg}) raise parseException return True
1,139,177
Transfer's an item from user's inventory to another inventory, returns result Parameters: loc (str) -- Location to send the item to (see Item.SHOP, Item.SDB, etc.) usr (User) -- User who has the item Returns bool - True if successful, false otherwise
def sendTo(self, loc, usr=None): if not loc in self._messages: return False if not self.usr and not usr: return False if self.usr: usr = self.usr # Request the item page first to look human pg = usr.getPage("http://www.neopets.com/iteminfo.phtml?obj_id=" + str(self.id)) form = pg.form(name="item_form") form['action'] = loc pg = form.submit() return self._messages[loc] in pg.content
1,139,178
Upgrade the package if there is a later version available. Args: restart: restart app if True dependencies: update package dependencies if True (see pip --no-deps) prerelease: update to pre-release and development versions
def smartupgrade(self, restart=True, dependencies=False, prerelease=False): if not self.check(): if self.verbose: print("Package {} already up-to-date!".format(self.pkg)) return if self.verbose: print("Upgrading {} ...".format(self.pkg)) self.upgrade(dependencies, prerelease, force=False) if restart: self.restart()
1,139,308
Upgrade the package unconditionaly Args: dependencies: update package dependencies if True (see pip --no-deps) prerelease: update to pre-release and development versions force: reinstall all packages even if they are already up-to-date Returns True if pip was sucessful
def upgrade(self, dependencies=False, prerelease=False, force=False): pip_args = ['install', self.pkg] found = self._get_current() != (-1) if found: pip_args.append("--upgrade") if force: pip_args.append( "--force-reinstall" if found else "--ignore-installed") if not dependencies: pip_args.append("--no-deps") if prerelease: pip_args.append("--pre") proxy = os.environ.get('http_proxy') if proxy: pip_args.extend(['--proxy', proxy]) if self.__index: pip_args.extend(['-i', self.index]) try: ecode = pip.main(args=pip_args) except TypeError: # pip changed in 0.6.0 from initial_args to args, this is for backwards compatibility # can be removed when pip 0.5 is no longer in use at all (approx. # year 2025) ecode = pip.main(initial_args=pip_args) if ecode != 0: raise PIPError(ecode)
1,139,309
Establish a marquise context for the provided namespace, getting spool filenames. Arguments: namespace -- must be lowercase alphanumeric ([a-z0-9]+). debug -- if debug is True, debugging output will be printed.
def __init__(self, namespace, debug=False): self.debug_enabled = debug self.namespace_c = cstring(namespace) self.marquise_ctx = MARQUISE_INIT(self.namespace_c) if is_cnull(self.marquise_ctx): if FFI.errno == errno.EINVAL: raise ValueError("Invalid namespace: %s" % namespace) raise RuntimeError("Something went wrong, got NULL instead of a marquise_ctx. build_spool_path() failed, or malloc failed. errno is %d" % FFI.errno) self.spool_path_points = cprint(self.marquise_ctx.spool_path_points) self.spool_path_contents = cprint(self.marquise_ctx.spool_path_contents)
1,140,294
Queue an extended datapoint (ie. a string), return True/False for success. Arguments: address -- uint64_t representing a unique metric. timestamp -- uint64_t representing number of nanoseconds (10^-9) since epoch. value -- string value being stored.
def send_extended(self, address, timestamp, value): if self.marquise_ctx is None: raise ValueError("Attempted to write to a closed Marquise handle.") self.__debug("Supplied address: %s" % address) if value is None: raise TypeError("Can't store None as a value.") value = str(value) if timestamp is None: timestamp = self.current_timestamp() # Use cast() here to make up the C datatypes for dispatch. # FFI will take care of converting them to the right endianness. I think. c_address = FFI.cast("uint64_t", address) c_timestamp = FFI.cast("uint64_t", timestamp) # c_value needs to be a byte array with a length in bytes c_value = cstring(value) c_length = FFI.cast("size_t", len_cstring(value)) self.__debug("Sending extended value '%s' with length of %d" % (value, c_length)) success = MARQUISE_SEND_EXTENDED(self.marquise_ctx, c_address, c_timestamp, c_value, c_length) if success != 0: self.__debug("send_extended returned %d, raising exception" % success) raise RuntimeError("send_extended was unsuccessful, errno is %d" % FFI.errno) self.__debug("send_extended returned %d" % success) return True
1,140,297
Pack the `metadata_dict` for an `address` into a data structure and ship it to the spool file. Arguments: address -- the address for which this metadata_dict applies. metadata_dict -- a Python dict of arbitrary string key-value pairs.
def update_source(self, address, metadata_dict): if self.marquise_ctx is None: raise ValueError("Attempted to write to a closed Marquise handle.") self.__debug("Supplied address: %s" % address) # Sanity check the input, everything must be UTF8 strings (not # yet confirmed), no Nonetypes or anything stupid like that. # # The keys of the key-value pairs are unique, by virtue of # taking a dict as input. if any([ x is None for x in metadata_dict.keys() ]): raise TypeError("One of your metadata_dict keys is a Nonetype") # Values are allowed to be None, coerce to empty strings. metadata_dict = dict([ (x[0],"" if x[1] is None else x[1]) for x in metadata_dict.items() ]) # Cast each string to a C-string. This may have unusual results if your # keys/vals aren't particularly stringy, such as Python classes, # Exceptions, etc. They will get str()'d, and they may look stupid. # pylint: disable=multiple-statements try: c_fields = [ cstring(str(x)) for x in metadata_dict.keys() ] except Exception as exc: raise TypeError("One of your metadata_dict keys couldn't be cast to a Cstring, %s" % exc) try: c_values = [ cstring(str(x)) for x in metadata_dict.values() ] except Exception as exc: raise TypeError("One of your metadata_dict values couldn't be cast to a Cstring, %s" % exc) # pylint: enable=multiple-statements # Get our source_dict data structure source_dict = MARQUISE_NEW_SOURCE(c_fields, c_values, len(metadata_dict)) if is_cnull(source_dict): raise ValueError("errno is set to EINVAL on invalid input, our errno is %d" % FFI.errno) # If you do something stupid, like passing a string where an # int (address) is meant to go, CFFI will explode. Which is # fine, but that causes memory leaks. The explosion still # occurs, but we cleanup after (before?) ourselves. try: success = MARQUISE_UPDATE_SOURCE(self.marquise_ctx, address, source_dict) except TypeError as exc: MARQUISE_FREE_SOURCE(source_dict) raise self.__debug("marquise_update_source returned %d" % success) if success != 0: MARQUISE_FREE_SOURCE(source_dict) raise RuntimeError("marquise_update_source was unsuccessful, errno is %d" % FFI.errno) MARQUISE_FREE_SOURCE(source_dict) return True
1,140,298
Constructor. Args: project_key (str): Project key which is used for the root of DB. conf_path (str): Path to the client zeo configuration file. Default :attr:`.settings.ZEO_CLIENT_PATH`.
def __init__(self, project_key, conf_path=settings.ZEO_CLIENT_PATH): super(self.__class__, self).__init__( conf_path=conf_path, project_key=project_key )
1,140,300
Get key from the :attr:`zeo` database root. If the key doesn't exist, create it by calling `new_type` argument. Args: key (str): Key in the root dict. new_type (func/obj): Object/function returning the new instance. Returns: obj: Stored object, or `new_type`.
def _zeo_key(self, key, new_type=OOBTree): zeo_key = self.zeo.get(key, None) if zeo_key is None: zeo_key = new_type() self.zeo[key] = zeo_key return zeo_key
1,140,301
Return list of database dictionaries, which are used as indexes for each attributes. Args: cached (bool, default True): Use cached connection to database. Returns: list: List of OOBTree's for each item in :attr:`.COMMON_FIELDS`.
def _get_db_fields(self, obj): for field in obj.indexes: yield field, self._zeo_key(field)
1,140,302
Make sure, that `pub` has the right interface. Args: pub (obj): Instance which will be checked. name (str): Name of the instance. Used in exception. Default `pub`. Raises: InvalidType: When the `pub` is not instance of `obj_type`.
def _check_obj_properties(self, pub, name="pub"): if not hasattr(pub, "indexes"): raise InvalidType("`%s` doesn't have .indexes property!" % name) if not pub.indexes: raise InvalidType("`%s.indexes` is not set!" % name) if not hasattr(pub, "project_key"): raise InvalidType( "`%s` doesn't have .project_key property!" % name ) if not pub.project_key: raise InvalidType("`%s.project_key` is not set!" % name)
1,140,303
Yield publications, at indexes defined by `query` property values. Args: query (obj): Object implementing proper interface. Yields: list: List of matching publications.
def _get_subset_matches(self, query): for field_name, db_index in self._get_db_fields(query): attr = getattr(query, field_name) if attr is None: # don't use unset attributes continue results = db_index.get(attr, OOTreeSet()) if results: yield results
1,140,306
Helper function to normalize version. Returns a comparable object. Args: version (str) version, e.g. "0.1.0"
def normalize_version(version): rv = [] for x in version.split("."): try: rv.append(int(x)) except ValueError: for y in re.split("([0-9]+)", x): if y == '': continue try: rv.append(int(y)) except ValueError: rv.append(y) return rv
1,140,344
calculates a multipart upload etag in the same way as amazon s3 args: source_path -- The file to calculate the etage for chunk_size -- The chunk size to calculate for. expected -- optional If passed a string, the string will be compared to the resulting etag and raise an exception if they don't match
def calculate_etag(self, expected=None): md5s = [] with open(self.filepath, 'rb') as fp: while True: data = fp.read(CHUNK_SIZE) if not data: break md5s.append(hashlib.md5(data)) digests = b"".join(m.digest() for m in md5s) new_md5 = hashlib.md5(digests) new_etag = '%s-%s' % (new_md5.hexdigest(),len(md5s)) self.etag = new_etag return new_etag
1,140,352
Moves the pen forward a few steps in the direction that its "turtle" is facing. Arguments: num_steps - a number like 20. A bigger number makes the pen move farther.
def move_forward(num_steps): assert int(num_steps) == num_steps, "move_forward() only accepts integers, but you gave it " + str(num_steps) _make_cnc_request("move.forward./" + str(num_steps)) state['turtle'].forward(num_steps)
1,140,405
Initializes a Pool of proto buffs. The descriptor_db argument to the constructor is provided to allow specialized file descriptor proto lookup code to be triggered on demand. An example would be an implementation which will read and compile a file specified in a call to FindFileByName() and not require the call to Add() at all. Results from this database will be cached internally here as well. Args: descriptor_db: A secondary source of file descriptors.
def __init__(self, descriptor_db=None): self._internal_db = descriptor_database.DescriptorDatabase() self._descriptor_db = descriptor_db self._descriptors = {} self._enum_descriptors = {} self._file_descriptors = {}
1,140,419
Adds an EnumDescriptor to the pool. This method also registers the FileDescriptor associated with the message. Args: enum_desc: An EnumDescriptor.
def AddEnumDescriptor(self, enum_desc): if not isinstance(enum_desc, descriptor.EnumDescriptor): raise TypeError('Expected instance of descriptor.EnumDescriptor.') self._enum_descriptors[enum_desc.full_name] = enum_desc self.AddFileDescriptor(enum_desc.file)
1,140,420
Gets the FileDescriptor for the file containing the specified symbol. Args: symbol: The name of the symbol to search for. Returns: A FileDescriptor that contains the specified symbol. Raises: KeyError: if the file can not be found in the pool.
def FindFileContainingSymbol(self, symbol): symbol = _NormalizeFullyQualifiedName(symbol) try: return self._descriptors[symbol].file except KeyError: pass try: return self._enum_descriptors[symbol].file except KeyError: pass try: file_proto = self._internal_db.FindFileContainingSymbol(symbol) except KeyError as error: if self._descriptor_db: file_proto = self._descriptor_db.FindFileContainingSymbol(symbol) else: raise error if not file_proto: raise KeyError('Cannot find a file containing %s' % symbol) return self._ConvertFileProtoToFileDescriptor(file_proto)
1,140,421
Loads the named descriptor from the pool. Args: full_name: The full name of the descriptor to load. Returns: The descriptor for the named type.
def FindMessageTypeByName(self, full_name): full_name = _NormalizeFullyQualifiedName(full_name) if full_name not in self._descriptors: self.FindFileContainingSymbol(full_name) return self._descriptors[full_name]
1,140,422
Loads the named enum descriptor from the pool. Args: full_name: The full name of the enum descriptor to load. Returns: The enum descriptor for the named type.
def FindEnumTypeByName(self, full_name): full_name = _NormalizeFullyQualifiedName(full_name) if full_name not in self._enum_descriptors: self.FindFileContainingSymbol(full_name) return self._enum_descriptors[full_name]
1,140,423
Loads the named extension descriptor from the pool. Args: full_name: The full name of the extension descriptor to load. Returns: A FieldDescriptor, describing the named extension.
def FindExtensionByName(self, full_name): full_name = _NormalizeFullyQualifiedName(full_name) message_name, _, extension_name = full_name.rpartition('.') try: # Most extensions are nested inside a message. scope = self.FindMessageTypeByName(message_name) except KeyError: # Some extensions are defined at file scope. scope = self.FindFileContainingSymbol(full_name) return scope.extensions_by_name[extension_name]
1,140,424
Make a protobuf EnumDescriptor given an EnumDescriptorProto protobuf. Args: enum_proto: The descriptor_pb2.EnumDescriptorProto protobuf message. package: Optional package name for the new message EnumDescriptor. file_desc: The file containing the enum descriptor. containing_type: The type containing this enum. scope: Scope containing available types. Returns: The added descriptor
def _ConvertEnumDescriptor(self, enum_proto, package=None, file_desc=None, containing_type=None, scope=None): if package: enum_name = '.'.join((package, enum_proto.name)) else: enum_name = enum_proto.name if file_desc is None: file_name = None else: file_name = file_desc.name values = [self._MakeEnumValueDescriptor(value, index) for index, value in enumerate(enum_proto.value)] desc = descriptor.EnumDescriptor(name=enum_proto.name, full_name=enum_name, filename=file_name, file=file_desc, values=values, containing_type=containing_type, options=enum_proto.options) scope['.%s' % enum_name] = desc self._enum_descriptors[enum_name] = desc return desc
1,140,425
Convert part of non-nested XML to :py:class:`dict`. Args: dom (HTMLElement tree): pre-parsed XML (see dhtmlparser). Returns: dict: with python data
def _alephResultToDict(dom): result = {} for i in dom.childs: if not i.isOpeningTag(): continue keyword = i.getTagName().strip() value = _tryConvertToInt(i.getContent().strip()) # if there are multiple tags with same keyword, add values into # array, instead of rewriting existing value at given keyword if keyword in result: # if it is already there .. if isinstance(result[keyword], list): # and it is list .. result[keyword].append(value) # add it to list else: # or make it array result[keyword] = [result[keyword], value] else: # if it is not in result, add it result[keyword] = value return result
1,140,515
Download MARC XML document with given `doc_id` from given `library`. Args: doc_id (DocumentID): You will get this from :func:`getDocumentIDs`. library (str): "``NKC01``" in our case, but don't worry, :func:`getDocumentIDs` adds library specification into :class:`DocumentID` named tuple. Returns: str: MARC XML unicode string. Raises: LibraryNotFoundException DocumentNotFoundException
def downloadMARCXML(doc_id, library, base="nkc"): downer = Downloader() data = downer.download( ALEPH_URL + Template(DOC_URL_TEMPLATE).substitute( DOC_ID=doc_id, LIBRARY=library ) ) dom = dhtmlparser.parseString(data) # check if there are any errors # bad library error error = dom.find("login") if error: error_msg = error[0].find("error") if error_msg: raise LibraryNotFoundException( "Can't download document doc_id: '" + str(doc_id) + "' " + "(probably bad library: '" + library + "')!\nMessage: " + "\n".join(map(lambda x: x.getContent(), error_msg)) ) # another error - document not found error = dom.find("ill-get-doc") if error: error_msg = error[0].find("error") if error_msg: raise DocumentNotFoundException( "\n".join(map(lambda x: x.getContent(), error_msg)) ) return data
1,140,519
Retrieves all the FASTQ files in project self.dx_project_name as DXFile objects. Args: barcode: `str`. If set, then only FASTQ file properties for FASTQ files having the specified barcode are returned. Returns: `list` of DXFile objects representing FASTQ files. Raises: `dnanexus_utils.FastqNotFound`: No FASTQ files were found.
def get_fastq_dxfile_objects(self,barcode=None): fq_ext_glob = "*{}".format(self.FQEXT) name = fq_ext_glob if barcode: name = "*_{barcode}_*{FQEXT}".format(barcode=barcode, FQEXT=self.FQEXT) fastqs= dxpy.find_data_objects(project=self.dx_project_id,folder=self.DX_FASTQ_FOLDER,name=name,name_mode="glob") if not fastqs: # Then look for them in all folders: fastqs= dxpy.find_data_objects(project=self.dx_project_id,name=name,name_mode="glob") if not fastqs: msg = "No FASTQ files found for run {run} ".format(run=proj_name) if barcode: msg += "and barcode {barcode}.".format(barcode=barcode) raise FastqNotFound(msg) fastqs = [dxpy.DXFile(project=x["project"],dxid=x["id"]) for x in fastqs] return fastqs
1,140,603
Parse <doc_number> tag from `xml`. Args: xml (str): XML string returned from :func:`aleph.aleph.downloadRecords` Returns: str: Doc ID as string or "-1" if not found.
def getDocNumber(xml): dom = dhtmlparser.parseString(xml) doc_number_tag = dom.find("doc_number") if not doc_number_tag: return "-1" return doc_number_tag[0].getContent().strip()
1,140,661
Initialize a new Vcs object for a repository located at `path`. If `path` is `None`, then `get_working_directory` is used to identify the path. Args: path (str) - optional. The path to the repo working directory.
def __init__(self, path=None): self.path = None if path is None: self.path = self.get_working_directory() else: self.path = path assert self.exists()
1,140,742
Runs a prompt to change the state of books. Registers `Event`s with `mgr` as they are requested. Args: mgr: A `KindleProgressMgr` object with the `books` and `progress` fields populated.
def _change_state_prompt(mgr): cmd = '' book_range = range(1, len(mgr.books) + 1) ind_to_book = dict(zip(book_range, mgr.books)) get_book = lambda cmd_str: ind_to_book[int(cmd_str.split()[1])] while cmd != 'q': print 'Books:' for i in book_range: print '\t%d: %s' % (i, ind_to_book[i]) print 'Commands:' print '| start {#} | Start reading book with index {#}' print '| finish {#} | Finish reading book with index {#}' print '| q | Quit' cmd = safe_raw_input('> ') if cmd is None or cmd == 'q': break elif cmd.startswith('start '): book = get_book(cmd) initial_progress = mgr.progress[book.asin].locs[1] event = SetReadingEvent(book.asin, initial_progress) elif cmd.startswith('finish '): event = SetFinishedEvent(get_book(cmd).asin) else: print 'Invalid command' event = None if event is not None: print print 'REGISTERED EVENT:' print ' ' + str(event) mgr.register_events((event)) print
1,140,785
Add an argument parser attribute `parser` to the decorated function. Args: func: the function for which we want to create an argument parser
def __call__(self, func): if not hasattr(func, "parser"): _LOG.debug("Creating parser for '%s'%s", func.__name__, "/%s" % self._name if self._name else "") (func_args, _, _, defaults) = getargspec(func) self._types, func_args = _check_types(func.__name__, self._types, func_args, defaults) args_and_defaults = _get_args_and_defaults(func_args, defaults) parser = _get_arg_parser(func, self._types, args_and_defaults, self._delimiter_chars) parser.get_name = lambda: self._name func.parser = parser func.parser.call = _get_parser_call_method(func) @wraps(func) def decorated(*args, **kwargs): return func(*args, **kwargs) return decorated
1,140,852
Add all the sub-parsers to the top_level_parser. Args: top_level_parser: the top level parser methods_to_parse: dict of method name pointing to their associated argument parser class_name: name of the decorated class Returns: a dict of registered name of the parser i.e. sub command name pointing to the method real name
def _add_sub_parsers(self, top_level_parser, methods_to_parse, class_name): description = "Accessible methods of {}".format(class_name) sub_parsers = top_level_parser.add_subparsers(description=description, dest="method") # Holds the mapping between the name registered for the parser # and the method real name. It is useful in the 'inner_call' # method retrieve the real method parser_to_method = {} for method_name, parser in methods_to_parse.items(): # We use the name provided in 'create_parser` or the name of the # decorated method parser_name = parser.get_name() or method_name # Make the method name compatible for the argument parsing if parser_name.startswith("_"): if not self._parse_private: # We skip private methods if the caller asked not to # parse them continue # 'Private' methods are exposed without their leading or # trailing '_'s. Also works for 'special' methods. parser_name = parser_name.strip("_") parser_name = parser_name.replace("_", "-") parser_to_method[parser_name] = method_name sub_parsers.add_parser(parser_name, parents=[parser], add_help=False, description=parser.description) return parser_to_method
1,140,855
Creates the complete argument parser for the decorated class. Args: init_parser: argument parser for the __init__ method or None methods_to_parse: dict of method name pointing to their associated argument parser cls: the class we are decorating Returns: The decorated class with an added attribute 'parser'
def _set_class_parser(self, init_parser, methods_to_parse, cls): top_level_parents = [init_parser] if init_parser else [] description = self._description or cls.__doc__ top_level_parser = argparse.ArgumentParser(description=description, parents=top_level_parents, add_help=False, conflict_handler="resolve") top_level_parser.add_argument("-h", "--help", action=FullHelpAction, help="Display this help message") parser_to_method = self._add_sub_parsers(top_level_parser, methods_to_parse, cls.__name__) # Update the dict with the __init__ method so we can instantiate # the decorated class if init_parser: parser_to_method["__init__"] = "__init__" top_level_parser.call = self._get_parser_call_method(parser_to_method) cls.parser = top_level_parser
1,140,856
Return the parser special method 'call' that handles sub-command calling. Args: parser_to_method: mapping of the parser registered name to the method it is linked to
def _get_parser_call_method(self, parser_to_method): def inner_call(args=None, instance=None): parser = self._cls.parser namespace = parser.parse_args(_get_args_to_parse(args, sys.argv)) if instance is None: # If the __init__ method is not part of the method to # decorate we cannot instantiate the class if "__init__" not in parser_to_method: raise ParseThisError(("'__init__' method is not decorated. " "Please provide an instance to " "'{}.parser.call' or decorate the " "'__init___' method with " "'create_parser'" .format(self._cls.__name__))) # We instantiate the class from the command line arguments instance = _call_method_from_namespace(self._cls, "__init__", namespace) method_name = parser_to_method[namespace.method] return _call_method_from_namespace(instance, method_name, namespace) return inner_call
1,140,857
Convert :class:`.MARCXMLRecord` object to :class:`.EPublication` namedtuple. Args: xml (str/MARCXMLRecord): MARC XML which will be converted to EPublication. In case of str, ``<record>`` tag is required. Returns: structure: :class:`.EPublication` namedtuple with data about \ publication.
def from_xml(xml): parsed = xml if not isinstance(xml, MARCXMLRecord): parsed = MARCXMLRecord(str(xml)) # check whether the document was deleted if "DEL" in parsed.datafields: raise DocumentNotFoundException("Document was deleted.") # convert Persons objects to amqp's Authors namedtuple authors = map( lambda a: Author( (a.name + " " + a.second_name).strip(), a.surname, a.title ), parsed.get_authors() ) # i know, that this is not PEP8, but you dont want to see it without # proper formating (it looks bad, really bad) return EPublication( ISBN = parsed.get_ISBNs(), invalid_ISBN = parsed.get_invalid_ISBNs(), id_number = parsed.controlfields.get("001", None), nazev = parsed.get_name(), podnazev = parsed.get_subname(), vazba = _first_or_blank_string(parsed.get_binding()), cena = parsed.get_price(), castDil = parsed.get_part(), nazevCasti = parsed.get_part_name(), nakladatelVydavatel = parsed.get_publisher(), datumVydani = parsed.get_pub_date(), poradiVydani = parsed.get_pub_order(), zpracovatelZaznamu = _first_or_blank_string(parsed["040a"]), format = parsed.get_format(), url = parsed.get_urls(), mistoVydani = parsed.get_pub_place(), ISBNSouboruPublikaci= [], autori = authors, originaly = parsed.get_originals(), internal_url = parsed.get_internal_urls(), anotace = None, # TODO: read the annotation )
1,140,925
Get list publications from all available source. Args: return_namedtuples (bool, default True): Convert :class:`.Publication` structures to namedtuples (used in AMQP communication). Returns: list: List of :class:`.Publication` structures converted to namedtuple.
def get_all_publications(return_namedtuples=True): sources = [ ben_cz.get_publications, grada_cz.get_publications, cpress_cz.get_publications, zonerpress_cz.get_publications, ] # get data from all scrappers publications = [] for source in sources: publications.extend( filters.filter_publications(source()) ) # convert to namedtuples if return_namedtuples: publications = map(lambda x: x.to_namedtuple(), publications) return publications
1,141,029
Create a config file from the provided sections and key value pairs. Args: sections (List[str]): A list of section keys. key_value_pairs (Dict[str, str]): A list of of dictionaries. Must be as long as the list of sections. That is to say, if there are two sections, there should be two dicts. Returns: configparser.ConfigParser: A ConfigParser. Raises: ValueError
def create_config(sections, section_contents): sections_length, section_contents_length = len(sections), len(section_contents) if sections_length != section_contents_length: raise ValueError("Mismatch between argument lengths.\n" "len(sections) = {}\n" "len(section_contents) = {}" .format(sections_length, section_contents_length)) config = configparser.ConfigParser() for section, section_content in zip(sections, section_contents): config[section] = section_content return config
1,141,073
Write the config to the output path. Creates the necessary directories if they aren't there. Args: config (configparser.ConfigParser): A ConfigParser.
def write_config(config, config_path=CONFIG_PATH): if not os.path.exists(config_path): os.makedirs(os.path.dirname(config_path)) with open(config_path, 'w', encoding='utf-8') as f: config.write(f)
1,141,074
Read the config information from the config file. Args: config_path (str): Relative path to the email config file. Returns: defaultdict: A defaultdict with the config information. Raises: IOError
def read_config(config_path=CONFIG_PATH): if not os.path.isfile(config_path): raise IOError("No config file found at %s" % config_path) config_parser = configparser.ConfigParser() config_parser.read(config_path) config = _config_parser_to_defaultdict(config_parser) return config
1,141,075
Check that all sections of the config contain the keys that they should. Args: config (defaultdict): A defaultdict. Raises: ConfigurationError
def check_config(config): for section, expected_section_keys in SECTION_KEYS.items(): section_content = config.get(section) if not section_content: raise ConfigurationError("Config file badly formed! Section {} is missing." .format(section)) elif not _section_is_healthy(section_content, expected_section_keys): raise ConfigurationError("The {} section of the configuration file is badly formed!" .format(section))
1,141,076
Run diagnostics on the configuration file. Args: config_path (str): Path to the configuration file. Returns: str, Set[str], dict(str, Set[str]): The path to the configuration file, a set of missing sections and a dict that maps each section to the entries that have either missing or empty options.
def run_config_diagnostics(config_path=CONFIG_PATH): config = read_config(config_path) missing_sections = set() malformed_entries = defaultdict(set) for section, expected_section_keys in SECTION_KEYS.items(): section_content = config.get(section) if not section_content: missing_sections.add(section) else: for option in expected_section_keys: option_value = section_content.get(option) if not option_value: malformed_entries[section].add(option) return config_path, missing_sections, malformed_entries
1,141,077
Try to parse an attribute of the config file. Args: config (defaultdict): A defaultdict. section (str): The section of the config file to get information from. attribute (str): The attribute of the section to fetch. Returns: str: The string corresponding to the section and attribute. Raises: ConfigurationError
def get_attribute_from_config(config, section, attribute): section = config.get(section) if section: option = section.get(attribute) if option: return option raise ConfigurationError("Config file badly formed!\n" "Failed to get attribute '{}' from section '{}'!" .format(attribute, section))
1,141,078
Verify that a valid config file exists. Args: config_path (str): Path to the config file. Returns: boolean: True if there is a valid config file, false if not.
def valid_config_exists(config_path=CONFIG_PATH): if os.path.isfile(config_path): try: config = read_config(config_path) check_config(config) except (ConfigurationError, IOError): return False else: return False return True
1,141,079
Nice output string for the config, which is a nested defaultdict. Args: config (defaultdict(defaultdict)): The configuration information. Returns: str: A human-readable output string detailing the contents of the config.
def config_to_string(config): output = [] for section, section_content in config.items(): output.append("[{}]".format(section)) for option, option_value in section_content.items(): output.append("{} = {}".format(option, option_value)) return "\n".join(output)
1,141,080
Convert a ConfigParser to a defaultdict. Args: config_parser (ConfigParser): A ConfigParser.
def _config_parser_to_defaultdict(config_parser): config = defaultdict(defaultdict) for section, section_content in config_parser.items(): if section != 'DEFAULT': for option, option_value in section_content.items(): config[section][option] = option_value return config
1,141,081
Fetch a blob. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. sha The SHA of the blob to fetch. Returns: A dict with data about the blob.
def get_blob(profile, sha): resource = "/blobs/" + sha data = api.get_request(profile, resource) return prepare(data)
1,141,095
Create a blob. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. content The (UTF-8 encoded) content to create in the blob. Returns: A dict with data about the newly created blob.
def create_blob(profile, content): resource = "/blobs" payload = {"content": content} data = api.post_request(profile, resource, payload) return data
1,141,096
Check if the current configuration is valid. Parameters: ----------- None Returns ------- bool True iff no problems were found.
def check(self): passed = True def check_type(attr, types): # checks whether the parameter has a certain type val = getattr(self, attr) if not isinstance(val, types): logger.error('Parameter "%s" = %s: invalid type ' + '(should be %s).', attr, val, str(types)) passed = False def check_file_exists(attr): # check whether the specified file exists path = getattr(self, attr) if not os.path.isfile(path): logger.error('Parameter "%s" = %s: file does not exist. ', attr, path) passed = False def check_dir_exists(attr): # check whether the specified directory exists path = getattr(self, attr) if not os.path.isdir(path): logger.error('Parameter "%s" = %s: directory does not exist.', attr, path) passed = False def check_dir_writable(attr): path = getattr(self, attr) if not misc.test_dir_writable(path): logger.error('Parameter "%s" = %s: directory is not writable', attr, path) passed = False def check_file_writable(attr): # check whether the specified file is writable path = getattr(self, attr) if not misc.test_file_writable(path): logger.error('Parameter "%s" = %s: file not writable.', attr, path) passed = False def check_range(attr, mn = None, mx = None, left_open = False, right_open = False): # checks if a GO-PCA parameter falls within a certain numeric range val = getattr(self, attr) in_range = True rel_op = {True: '<', False: '<='} if mn is not None: left_rel = '%s %s ' %(str(mn), rel_op[left_open]) if left_open: if not mn < val: in_range = False else: if not mn <= val: in_range = False if mx is not None: right_rel = ' %s %s' %(rel_op[right_open], str(mx)) if right_open: if not val < mx: in_range = False else: if not val <= mx: in_range = False if not in_range: logger.error('Parameter "%s" = %s: out of range ' + '(should be %s %s %s).', attr, val, left_rel, attr, right_rel) passed = False # check if input files are strings # specification of gene ontology file is optional check_type('data_dir', (str, unicode)) check_dir_writable('data_dir') check_type('job_dir', (str, unicode)) check_dir_writable('job_dir') if self.ssl_dir is not None: check_dir_exists('ssl_dir') if not os.path.isfile(ssl_dir + os.sep + 'gopca_server.cert'): logger.error('SSL certificate file not found!') passed = False if not os.path.isfile(ssl_dir + os.sep + 'gopca_server.key'): logger.error('SSL key file no found!') passed = False check_type('port', int) # TO-DO: check remaining parameters return passed
1,141,141
Converts parameter names from snake_case to camelCase. Args: name, str. Snake case. strict: bool, default True. If True, will set name to lowercase before converting, otherwise assumes original name is proper camel case. Set to False if name may already be in camelCase. Returns: str: CamelCase.
def _snake_to_camel(name, strict=False): if strict: name = name.lower() terms = name.split('_') return terms[0] + ''.join([term.capitalize() for term in terms[1:]])
1,141,800