code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def inverse(self, N):
if N == 0:
return 0
lm, hm = 1, 0
low, high = N % self.P, self.P
while low > 1:
r = high//low
nm, new = hm - lm * r, high - low * r
lm, low, hm, high = nm, new, lm, low
return lm % self.P | Returns the modular inverse of an integer with respect to the field
characteristic, P.
Use the Extended Euclidean Algorithm:
https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm |
def is_on_curve(self, point):
X, Y = point.X, point.Y
return (
pow(Y, 2, self.P) - pow(X, 3, self.P) - self.a * X - self.b
) % self.P == 0 | Checks whether a point is on the curve.
Args:
point (AffinePoint): Point to be checked.
Returns:
bool: True if point is on the curve, False otherwise. |
def generate_private_key(self):
random_string = base64.b64encode(os.urandom(4096)).decode('utf-8')
binary_data = bytes(random_string, 'utf-8')
hash_object = hashlib.sha256(binary_data)
message_digest_bin = hash_object.digest()
message_digest_hex = binascii.hexlify(message_digest_bin)
return message_digest_hex | Generates a private key based on the password.
SHA-256 is a member of the SHA-2 cryptographic hash functions designed by
the NSA. SHA stands for Secure Hash Algorithm. The password is converted
to bytes and hashed with SHA-256. The binary output is converted to a hex
representation.
Args:
data (str): The data to be hashed with SHA-256.
Returns:
bytes: The hexadecimal representation of the hashed binary data. |
def generate_public_key(self):
private_key = int(self.private_key, 16)
if private_key >= self.N:
raise Exception('Invalid private key.')
G = JacobianPoint(self.Gx, self.Gy, 1)
public_key = G * private_key
x_hex = '{0:0{1}x}'.format(public_key.X, 64)
y_hex = '{0:0{1}x}'.format(public_key.Y, 64)
return '04' + x_hex + y_hex | Generates a public key from the hex-encoded private key using elliptic
curve cryptography. The private key is multiplied by a predetermined point
on the elliptic curve called the generator point, G, resulting in the
corresponding private key. The generator point is always the same for all
Bitcoin users.
Jacobian coordinates are used to represent the elliptic curve point G.
https://en.wikibooks.org/wiki/Cryptography/Prime_Curve/Jacobian_Coordinates
The exponentiating by squaring (also known by double-and-add) method is
used for the elliptic curve multiplication that results in the public key.
https://en.wikipedia.org/wiki/Exponentiation_by_squaring
Bitcoin public keys are 65 bytes. The first byte is 0x04, next 32
bytes correspond to the X coordinate, and last 32 bytes correspond
to the Y coordinate. They are typically encoded as 130-length hex
characters.
Args:
private_key (bytes): UTF-8 encoded hexadecimal
Returns:
str: The public key in hexadecimal representation. |
def generate_address(self):
binary_pubkey = binascii.unhexlify(self.public_key)
binary_digest_sha256 = hashlib.sha256(binary_pubkey).digest()
binary_digest_ripemd160 = hashlib.new('ripemd160', binary_digest_sha256).digest()
binary_version_byte = bytes([0])
binary_with_version_key = binary_version_byte + binary_digest_ripemd160
checksum_intermed = hashlib.sha256(binary_with_version_key).digest()
checksum_intermed = hashlib.sha256(checksum_intermed).digest()
checksum = checksum_intermed[:4]
binary_address = binary_digest_ripemd160 + checksum
leading_zero_bytes = 0
for char in binary_address:
if char == 0:
leading_zero_bytes += 1
inp = binary_address + checksum
result = 0
while len(inp) > 0:
result *= 256
result += inp[0]
inp = inp[1:]
result_bytes = bytes()
while result > 0:
curcode = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'[result % 58]
result_bytes = bytes([ord(curcode)]) + result_bytes
result //= 58
pad_size = 0 - len(result_bytes)
padding_element = b'1'
if pad_size > 0:
result_bytes = padding_element * pad_size + result_bytes
result = ''.join([chr(y) for y in result_bytes])
address = '1' * leading_zero_bytes + result
return address | Creates a Bitcoin address from the public key.
Details of the steps for creating the address are outlined in this link:
https://en.bitcoin.it/wiki/Technical_background_of_version_1_Bitcoin_addresses
The last step is Base58Check encoding, which is similar to Base64 encoding but
slightly different to create a more human-readable string where '1' and 'l' won't
get confused. More on Base64Check encoding here:
https://en.bitcoin.it/wiki/Base58Check_encoding |
def double(self):
X1, Y1, Z1 = self.X, self.Y, self.Z
if Y1 == 0:
return POINT_AT_INFINITY
S = (4 * X1 * Y1 ** 2) % self.P
M = (3 * X1 ** 2 + self.a * Z1 ** 4) % self.P
X3 = (M ** 2 - 2 * S) % self.P
Y3 = (M * (S - X3) - 8 * Y1 ** 4) % self.P
Z3 = (2 * Y1 * Z1) % self.P
return JacobianPoint(X3, Y3, Z3) | Doubles this point.
Returns:
JacobianPoint: The point corresponding to `2 * self`. |
def to_affine(self):
X, Y, Z = self.x, self.y, self.inverse(self.z)
return ((X * Z ** 2) % P, (Y * Z ** 3) % P) | Converts this point to an affine representation.
Returns:
AffinePoint: The affine reprsentation. |
def double(self):
X1, Y1, a, P = self.X, self.Y, self.a, self.P
if self.infinity:
return self
S = ((3 * X1 ** 2 + a) * self.inverse(2 * Y1)) % P
X2 = (S ** 2 - (2 * X1)) % P
Y2 = (S * (X1 - X2) - Y1) % P
return AffinePoint(X2, Y2) | Doubles this point.
Returns:
AffinePoint: The point corresponding to `2 * self`. |
def slope(self, other):
X1, Y1, X2, Y2 = self.X, self.Y, other.X, other.Y
Y3 = Y1 - Y2
X3 = X1 - X2
return (Y3 * self.inverse(X3)) % self.P | Determines the slope between this point and another point.
Args:
other (AffinePoint): The second point.
Returns:
int: Slope between self and other. |
def to_jacobian(self):
if not self:
return JacobianPoint(X=0, Y=0, Z=0)
return JacobianPoint(X=self.X, Y=self.Y, Z=1) | Converts this point to a Jacobian representation.
Returns:
JacobianPoint: The Jacobian representation. |
def import_model(self, name, path="floyd.db.models"):
if name in self._model_cache:
return self._model_cache[name]
try:
model = getattr(__import__(path, None, None, [name]), name)
self._model_cache[name] = model
except ImportError:
return False
return model | imports a model of name from path, returning from local model
cache if it has been previously loaded otherwise importing |
def parse_md(self):
post_content = _MARKDOWN.convert(self.raw_src)
if hasattr(_MARKDOWN, 'Meta'):
# 'Meta' in _MARKDOWN and _MARKDOWN.Meta:
for key in _MARKDOWN.Meta:
print "\t meta: %s: %s (%s)" % (key, _MARKDOWN.Meta[key][0], type(_MARKDOWN.Meta[key][0]))
if key == 'pubdate':
setattr(self, key, datetime.datetime.fromtimestamp(float(_MARKDOWN.Meta[key][0])))
else:
setattr(self, key, _MARKDOWN.Meta[key][0])
self.content = post_content
self.stub = self.__key__
# set required fields
# @TODO required in schema rather than here
if not hasattr(self, 'pubdate'):
print '\t Notice: setting default pubdate'
setattr(self, 'pubdate', datetime.datetime.now()) | Takes a post path and returns a dictionary of variables |
def filter(self, **kwargs):
# @TODO refactor with models as dicts
f_field = kwargs.keys()[0]
f_value = kwargs[f_field]
_newset = []
for m in self._dataset:
if hasattr(m, f_field):
if getattr(m, f_field) == f_value:
_newset.append(m)
self._dataset = _newset
return self | filter results of dataset eg.
Query('Posts').filter(post_type='post') |
def sort_by(self, sb):
self._dataset = self._dataset.sort(key=lambda x: x.pubdate, reverse=True)
return self | Sort results |
def execute_train_task_with_dependencies(self, task_cls, **kwargs):
log.info("Task {0}".format(get_task_name(task_cls)))
#Instantiate the task
task_inst = task_cls()
#Grab arguments from the task instance and set them
for arg in task_inst.args:
if arg not in kwargs:
kwargs[arg] = task_inst.args[arg]
#Check for dependencies defined by the task
if hasattr(task_inst, "dependencies"):
deps = task_inst.dependencies
dep_results = []
#Run the dependencies through recursion (in case of dependencies of dependencies, etc)
for dep in deps:
log.info("Dependency {0}".format(get_task_name(dep)))
dep_results.append(self.execute_train_task_with_dependencies(dep.cls, **dep.args))
trained_dependencies = []
#Add executed dependency to trained_dependencies list on the task
for i in xrange(0,len(deps)):
dep = deps[i]
dep_result = dep_results[i]
name = dep.name
namespace = dep.namespace
category = dep.category
trained_dependencies.append(TrainedDependency(category=category, namespace=namespace, name = name, inst = dep))
task_inst.trained_dependencies = trained_dependencies
#Finally, run the task
task_inst.train(**kwargs)
return task_inst | Run the training, as well as any dependencies of the training
task_cls - class of a task |
def execute_predict_task(self, task_inst, predict_data, **kwargs):
result = task_inst.predict(predict_data, **task_inst.args)
return result | Do a prediction
task_inst - instance of a task |
def train(self, **kwargs):
log.info("Starting to train...")
if not self.setup_run:
self.setup()
self.trained_tasks = []
for task in self.tasks:
data = self.reformatted_input[task.data_format]['data']
target = self.reformatted_input[task.data_format]['target']
if data is None:
raise Exception("Data cannot be none. Check the config file to make sure the right input is being read.")
kwargs['data']=data
kwargs['target']=target
trained_task = self.execute_train_task_with_dependencies(task, **kwargs)
self.trained_tasks.append(trained_task)
#If the trained task alters the data in any way, pass it down the chain to the next task
if hasattr(trained_task, 'data'):
self.reformatted_input[task.data_format]['data'] = trained_task.data
log.info("Finished training.") | Do the workflow training |
def predict(self, **kwargs):
reformatted_predict = self.reformat_predict_data()
results = {}
for task_inst in self.trained_tasks:
predict = reformatted_predict[task_inst.data_format]['predict']
kwargs['predict']=predict
results.update({get_task_name(task_inst) : self.execute_predict_task(task_inst, predict, **kwargs)})
return results | Do the workflow prediction (done after training, with new data) |
def read_input(self, input_cls, filename, **kwargs):
input_inst = input_cls()
input_inst.read_input(filename)
return input_inst.get_data() | Read in input and do some minimal preformatting
input_cls - the class to use to read the input
filename - input filename |
def reformat_file(self, input_file, input_format, output_format):
#Return none if input_file or input_format do not exist
if input_file is None or input_format is None:
return None
#Find the needed input class and read the input stream
try:
input_cls = self.find_input(input_format)
input_inst = input_cls()
except TypeError:
#Return none if input_cls is a Nonetype
return None
#If the input file cannot be found, return None
try:
input_inst.read_input(self.absolute_filepath(input_file))
except IOError:
return None
formatter = find_needed_formatter(input_format, output_format)
if formatter is None:
raise Exception("Cannot find a formatter that can convert from {0} to {1}".format(self.input_format, output_format))
formatter_inst = formatter()
formatter_inst.read_input(input_inst.get_data(), input_format)
data = formatter_inst.get_data(output_format)
return data | Reformat input data files to a format the tasks can use |
def reformat_input(self, **kwargs):
reformatted_input = {}
needed_formats = []
for task_cls in self.tasks:
needed_formats.append(task_cls.data_format)
self.needed_formats = list(set(needed_formats))
for output_format in self.needed_formats:
reformatted_input.update(
{
output_format :
{
'data' : self.reformat_file(self.input_file, self.input_format, output_format),
'target' : self.reformat_file(self.target_file, self.target_format, output_format)
}
}
)
return reformatted_input | Reformat input data |
def _create_modulename(cdef_sources, source, sys_version):
key = '\x00'.join([sys_version[:3], source, cdef_sources])
key = key.encode('utf-8')
k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff)
k1 = k1.lstrip('0x').rstrip('L')
k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff)
k2 = k2.lstrip('0').rstrip('L')
return '_xprintidle_cffi_{0}{1}'.format(k1, k2) | This is the same as CFFI's create modulename except we don't include the
CFFI version. |
def server_identity_is_verified(self):
# Encrypt a uuid token for the server
server_verify_token = self.gpg.encrypt(self._nonce0,
self.server_fingerprint, always_trust=True)
if not server_verify_token.ok:
raise GPGAuthStage0Exception(
'Encryption of the nonce0 (%s) '
'to the server fingerprint (%s) failed.' %
(self._nonce0, self.server_fingerprint)
)
server_verify_response = post_server_verify_token(
self,
keyid=self.user_fingerprint,
server_verify_token=str(server_verify_token)
)
if not check_server_verify_response(server_verify_response):
raise GPGAuthStage0Exception("Verify endpoint wrongly formatted")
if server_verify_response.headers.get('X-GPGAuth-Verify-Response') != self._nonce0:
raise GPGAuthStage0Exception(
'The server decrypted something different than what we sent '
'(%s <> %s)' %
(server_verify_response.headers.get('X-GPGAuth-Verify-Response'), self._nonce0))
logger.info('server_identity_is_verified: OK')
return True | GPGAuth stage0 |
def user_auth_token(self):
# stage0 is a prequisite
if not self.server_identity_is_verified:
return False
server_login_response = post_log_in(
self,
keyid=self.user_fingerprint
)
if not check_server_login_stage1_response(server_login_response):
raise GPGAuthStage1Exception("Login endpoint wrongly formatted")
# Get the encrypted User Auth Token
encrypted_user_auth_token = unquote_plus(
server_login_response.headers.get('X-GPGAuth-User-Auth-Token')
.replace('\\\\', '\\')
).replace('\\ ', ' ')
logger.debug('User token to decrypt: %s', encrypted_user_auth_token)
logger.info('Decrypting the user authentication token; '
'password prompt expected')
passphrase = None
# For the sake of tests, allow one to set the passphrase onto
# the object
if hasattr(self, '_user_passphrase'):
passphrase = self._user_passphrase
user_auth_token = self.gpg.decrypt(encrypted_user_auth_token, always_trust=True, passphrase=passphrase)
if not user_auth_token.ok:
raise GPGAuthStage1Exception("Auth token decryption failed: %s", user_auth_token.status)
logger.info('user_auth_token: %s', user_auth_token)
return str(user_auth_token) | GPGAuth Stage1 |
def is_authenticated_with_token(self):
""" Send back the token to the server to get auth cookie """
server_login_response = post_log_in(
self,
keyid=self.user_fingerprint,
user_token_result=self.user_auth_token
)
if not check_server_login_stage2_response(server_login_response):
raise GPGAuthStage2Exception("Login endpoint wrongly formatted")
self.cookies.save(ignore_discard=True)
logger.info('is_authenticated_with_token: OK')
return True | GPGAuth Stage 2 |
def publish(self,message,message_type,topic=''):
if message_type == MULTIPART:
raise Exception("Unsupported request type")
super(Publisher,self).send(message,message_type,topic) | Publish the message on the PUB socket with the given topic name.
Args:
- message: the message to publish
- message_type: the type of message being sent
- topic: the topic on which to send the message. Defaults to ''. |
def load(self, cls, run_id):
id_code = self.generate_load_identifier(cls, run_id)
inst = self.store.load(id_code)
return inst | Load a workflow
cls - workflow class (to get __name__ from)
run_id - id given to the specific run |
def save(self, obj, run_id):
id_code = self.generate_save_identifier(obj, run_id)
self.store.save(obj, id_code) | Save a workflow
obj - instance of a workflow to save
run_id - unique id to give the run |
def setup_tasks(self, tasks):
task_classes = []
for task in tasks:
category, namespace, name = task.split(".")
try:
cls = find_in_registry(category=category, namespace=namespace, name=name)[0]
except TypeError:
log.error("Could not find the task with category.namespace.name {0}".format(task))
raise TypeError
task_classes.append(cls)
self.tasks = task_classes | Find task classes from category.namespace.name strings
tasks - list of strings |
def initialize_workflow(self, workflow):
self.workflow = workflow()
self.workflow.tasks = self.tasks
self.workflow.input_file = self.input_file
self.workflow.input_format = self.input_format
self.workflow.target_file = self.target_file
self.workflow.target_format = self.target_format
self.workflow.run_id = self.run_id
self.workflow.setup() | Create a workflow
workflow - a workflow class |
def reformat_filepath(self, config_file, filename):
if not filename.startswith("/"):
filename = self.config_file_format.format(config_file, filename)
return filename | Convert relative paths in config file to absolute |
def item_lister(command, _connection, page_size, page_number, sort_by,
sort_order, item_class, result_set, **kwargs):
# pylint: disable=R0913
page = page_number
while True:
item_collection = _connection.get_list(command,
page_size=page_size,
page_number=page,
sort_by=sort_by,
sort_order=sort_order,
item_class=item_class,
**kwargs)
result_set.total_count = item_collection.total_count
result_set.page_number = page
for item in item_collection.items:
yield item
if item_collection.total_count < 0 or item_collection.page_size == 0:
break
if len(item_collection.items) > 0:
page += 1
else:
break | A generator function for listing Video and Playlist objects. |
def get_manifest(self, asset_xml):
# pylint: disable=E1101
manifest = '<?xml version="1.0" encoding="utf-8"?>'
manifest += '<publisher-upload-manifest publisher-id="%s" ' % \
self.publisher_id
manifest += 'preparer="%s" ' % self.preparer
if self.report_success:
manifest += 'report-success="TRUE">\n'
for notify in self.notifications:
manifest += '<notify email="%s"/>' % notify
if self.callback:
manifest += '<callback entity-url="%s"/>' % self.callback
manifest += asset_xml
manifest += '</publisher-upload-manifest>'
return manifest | Construct and return the xml manifest to deliver along with video file. |
def _send_file(self, filename):
# pylint: disable=E1101
ftp = ftplib.FTP(host=self.host)
ftp.login(user=self.user, passwd=self.password)
ftp.set_pasv(True)
ftp.storbinary("STOR %s" % os.path.basename(filename),
file(filename, 'rb')) | Sends a file via FTP. |
def _post(self, data, file_to_upload=None):
# pylint: disable=E1101
params = {"JSONRPC": simplejson.dumps(data)}
req = None
if file_to_upload:
req = http_core.HttpRequest(self.write_url)
req.method = 'POST'
req.add_body_part("JSONRPC", simplejson.dumps(data), 'text/plain')
upload = file(file_to_upload, "rb")
req.add_body_part("filePath", upload, 'application/octet-stream')
req.end_of_parts()
content_type = "multipart/form-data; boundary=%s" % \
http_core.MIME_BOUNDARY
req.headers['Content-Type'] = content_type
req.headers['User-Agent'] = config.USER_AGENT
req = http_core.ProxiedHttpClient().request(req)
else:
msg = urllib.urlencode({'json': params['JSONRPC']})
req = urllib2.urlopen(self.write_url, msg)
if req:
result = simplejson.loads(req.read())
if 'error' in result and result['error']:
exceptions.BrightcoveError.raise_exception(
result['error'])
return result['result'] | Make the POST request. |
def _get_response(self, **kwargs):
# pylint: disable=E1101
url = self.read_url + "?output=JSON&token=%s" % self.read_token
for key in kwargs:
if key and kwargs[key]:
val = kwargs[key]
if isinstance(val, (list, tuple)):
val = ",".join(val)
url += "&%s=%s" % (key, val)
self._api_url = url
req = urllib2.urlopen(url)
data = simplejson.loads(req.read())
self._api_raw_data = data
if data and data.get('error', None):
exceptions.BrightcoveError.raise_exception(
data['error'])
if data == None:
raise exceptions.NoDataFoundError(
"No data found for %s" % repr(kwargs))
return data | Make the GET request. |
def get_list(self, command, item_class, page_size, page_number, sort_by,
sort_order, **kwargs):
# pylint: disable=R0913,W0221
data = self._get_response(command=command,
page_size=page_size,
page_number=page_number,
sort_by=sort_by,
sort_order=sort_order,
video_fields=None,
get_item_count="true",
**kwargs)
return ItemCollection(data=data,
item_class=item_class,
_connection=self) | Not intended to be called directly, but rather through an by the
ItemResultSet object iterator. |
def initialize_renderer(extensions=None):
if extensions is None:
extensions = []
if isinstance(extensions, str):
extensions = [extension.strip() for extension in extensions.split(',')]
for extension in getattr(settings, 'MARKYMARK_EXTENSIONS', DEFAULT_MARKYMARK_EXTENSIONS):
extensions.append(extension)
return markdown.Markdown(extensions=extensions) | Initializes the renderer by setting up the extensions (taking a comma separated
string or iterable of extensions). These extensions are added alongside with the
configured always-on extensions.
Returns a markdown renderer instance. |
def setup_formats(self):
methods = self.get_methods()
for m in methods:
#Methods named "from_X" will be assumed to convert from format X to the common format
if m.startswith("from_"):
self.input_formats.append(re.sub("from_" , "",m))
#Methods named "to_X" will be assumed to convert from the common format to X
elif m.startswith("to_"):
self.output_formats.append(re.sub("to_","",m)) | Inspects its methods to see what it can convert from and to |
def read_input(self, input_data, data_format):
if data_format not in self.input_formats:
raise Exception("Input format {0} not available with this class. Available formats are {1}.".format(data_format, self.input_formats))
data_converter = getattr(self, "from_" + data_format)
self.data = data_converter(input_data) | Reads the input data and converts to common format
input_data - the output from one of the input classes (ie CSVInput)
data_format - the format of the data. See utils.input.dataformats |
def get_data(self, data_format):
if data_format not in self.output_formats:
raise Exception("Output format {0} not available with this class. Available formats are {1}.".format(data_format, self.output_formats))
data_converter = getattr(self, "to_" + data_format)
return data_converter() | Reads the common format and converts to output data
data_format - the format of the output data. See utils.input.dataformats |
def from_csv(self, input_data):
reformatted_data = []
for (i,row) in enumerate(input_data):
if i==0:
headers = row
else:
data_row = {}
for (j,h) in enumerate(headers):
data_row.update({h : row[j]})
reformatted_data.append(data_row)
return reformatted_data | Reads csv format input data and converts to json. |
def to_dataframe(self):
keys = self.data[0].keys()
column_list =[]
for k in keys:
key_list = []
for i in xrange(0,len(self.data)):
key_list.append(self.data[i][k])
column_list.append(key_list)
df = DataFrame(np.asarray(column_list).transpose(), columns=keys)
for i in xrange(0,df.shape[1]):
if is_number(df.iloc[:,i]):
df.iloc[:,i] = df.iloc[:,i].astype(float)
return df | Reads the common format self.data and writes out to a dataframe. |
def check_extensions(extensions: Set[str], allow_multifile: bool = False):
check_var(extensions, var_types=set, var_name='extensions')
# -- check them one by one
for ext in extensions:
check_extension(ext, allow_multifile=allow_multifile) | Utility method to check that all extensions in the provided set are valid
:param extensions:
:param allow_multifile:
:return: |
def check_extension(extension: str, allow_multifile: bool = False):
check_var(extension, var_types=str, var_name='extension')
# Extension should either be 'multifile' or start with EXT_SEPARATOR and contain only one EXT_SEPARATOR
if (extension.startswith(EXT_SEPARATOR) and extension.count(EXT_SEPARATOR) == 1) \
or (allow_multifile and extension is MULTIFILE_EXT):
# ok
pass
else:
raise ValueError('\'extension\' should start with \'' + EXT_SEPARATOR + '\' and contain not other '
'occurrence of \'' + EXT_SEPARATOR + '\'' + (', or be equal to \'' + MULTIFILE_EXT + '\' (for '
'multifile object parsing)' if allow_multifile else '')) | Utility method to check that the provided extension is valid. Extension should either be MULTIFILE_EXT
(='multifile') or start with EXT_SEPARATOR (='.') and contain only one occurence of EXT_SEPARATOR
:param extension:
:param allow_multifile:
:return: |
def get_parsing_plan_log_str(obj_on_fs_to_parse, desired_type, log_only_last: bool, parser):
loc = obj_on_fs_to_parse.get_pretty_location(blank_parent_part=(log_only_last
and not GLOBAL_CONFIG.full_paths_in_logs),
compact_file_ext=True)
return '{loc} -> {type} ------- using {parser}'.format(loc=loc, type=get_pretty_type_str(desired_type),
parser=str(parser)) | Utility method used by several classes to log a message indicating that a given file object is planned to be parsed
to the given object type with the given parser. It is in particular used in str(ParsingPlan), but not only.
:param obj_on_fs_to_parse:
:param desired_type:
:param log_only_last: a flag to only log the last part of the file path (default False). Note that this can be
overriden by a global configuration 'full_paths_in_logs'
:param parser:
:return: |
def are_worth_chaining(parser, to_type: Type[S], converter: Converter[S, T]) -> bool:
if not parser.can_chain:
# The base parser prevents chaining
return False
elif not is_any_type(to_type) and is_any_type(converter.to_type):
# we gain the capability to generate any type. So it is interesting.
return True
elif issubclass(to_type, converter.to_type):
# Not interesting : the outcome of the chain would be not better than one of the parser alone
return False
# Note: we dont say that chaining a generic parser with a converter is useless. Indeed it might unlock some
# capabilities for the user (new file extensions, etc.) that would not be available with the generic parser
# targetting to_type alone. For example parsing object A from its constructor then converting A to B might
# sometimes be interesting, rather than parsing B from its constructor
else:
# Interesting
return True | Utility method to check if it makes sense to chain this parser with the given destination type, and the given
converter to create a parsing chain. Returns True if it brings value to chain them.
To bring value,
* the converter's output should not be a parent class of the parser's output. Otherwise
the chain does not even make any progress :)
* The parser has to allow chaining (with converter.can_chain=True)
:param parser:
:param to_type:
:param converter:
:return: |
def create_for_caught_error(parser: _BaseParserDeclarationForRegistries, desired_type: Type[T],
obj: PersistedObject, caught: Exception, options: Dict[str, Dict[str, Any]]):
try:
typ = get_pretty_type_str(desired_type)
except:
typ = str(desired_type)
e = ParsingException('Error while parsing ' + str(obj) + ' as a ' + typ + ' with parser \''
+ str(parser) + '\' using options=(' + str(options) + ') : caught \n '
+ str(caught.__class__.__name__) + ' : ' + str(caught))\
.with_traceback(caught.__traceback__) # 'from e' was hiding the inner traceback. This is much better for debug
e.__cause__ = None
# e.__cause__ = caught
# store the exception still, to be able to handle it later
e.caught = caught
return e | Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests
https://github.com/nose-devs/nose/issues/725
:param parser:
:param desired_type:
:param obj:
:param caught:
:param options:
:return: |
def create_for_wrong_result_type(parser: _BaseParserDeclarationForRegistries, desired_type: Type[T],
obj: PersistedObject, result: T, options: Dict[str, Dict[str, Any]]):
msg = "Error while parsing {obj} as a {typ} with parser {p} using options=({opts}) - parser returned an object " \
"of wrong type {tret}: {ret}".format(obj=obj, typ=get_pretty_type_str(desired_type), p=parser,
opts=options, tret=type(result), ret=result)
return WrongTypeCreatedError(msg) | Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests
https://github.com/nose-devs/nose/issues/725
:param parser:
:param desired_type:
:param obj:
:param result:
:param options:
:return: |
def execute(self, logger: Logger, options: Dict[str, Dict[str, Any]]) -> T:
try:
res = self._execute(logger, options)
except Exception as e:
raise ParsingException.create_for_caught_error(self.parser, self.obj_type, self.obj_on_fs_to_parse, e,
options)
# Check that the returned parsed object has the correct type
if res is not None:
if robust_isinstance(res, self.obj_type):
return res
# wrong type : error
raise WrongTypeCreatedError.create_for_wrong_result_type(self.parser, self.obj_type, self.obj_on_fs_to_parse,
res, options) | Called to parse the object as described in this parsing plan, using the provided arguments for the parser.
* Exceptions are caught and wrapped into ParsingException
* If result does not match expected type, an error is thrown
:param logger: the logger to use during parsing (optional: None is supported)
:param options: a dictionary of option sets. Each option set is identified with an id in the dictionary.
:return: |
def _execute(self, logger: Logger, options: Dict[str, Dict[str, Any]]) -> T:
pass | Implementing classes should perform the parsing here, possibly using custom methods of self.parser.
:param logger:
:param options:
:return: |
def _get_applicable_options(self, options: Dict[str, Dict[str, Any]]):
return get_options_for_id(options, self.get_id_for_options()) | Returns the options that are applicable to this particular parser, from the full map of options.
It first uses 'get_id_for_options()' to know the id of this parser, and then simply extracts the contents of
the options corresponding to this id, or returns an empty dict().
:param options: a dictionary parser_id > options
:return: |
def create_parsing_plan(self, desired_type: Type[T], filesystem_object: PersistedObject, logger: Logger,
options: Dict[str, Dict[str, Any]]) -> ParsingPlan[T]:
pass | Creates a parsing plan to parse the given filesystem object into the given desired_type.
Implementing classes may wish to support additional parameters.
:param desired_type: the type of object that should be created as the output of parsing plan execution.
:param filesystem_object: the persisted object that should be parsed
:param logger: an optional logger to log all parsing plan creation and execution information
:param options: a dictionary additional implementation-specific parameters (one dict per parser id).
Implementing classes may use 'self._get_applicable_options()' to get the options that are of interest for this
parser.
:return: |
def add(self, f_ipaddr, f_macaddr, f_hostname, f_netbios_name, f_engineer, f_asset_group, f_confirmed):
return self.send.host_add(f_ipaddr, f_macaddr, f_hostname, f_netbios_name, f_engineer,
f_asset_group, f_confirmed) | Add a t_hosts record
:param f_ipaddr: IP address
:param f_macaddr: MAC Address
:param f_hostname: Hostname
:param f_netbios_name: NetBIOS Name
:param f_engineer: Engineer username
:param f_asset_group: Asset group
:param f_confirmed: Confirmed boolean
:return: (True/False, t_hosts.id or response message) |
def parse_now_field(s):
if not s.startswith('UTC:'):
return None # Invalid string
s = s[4:]
# isoformat can return strings both with and without microseconds - we
# account for both
try:
dt = datetime.datetime.strptime(s, '%Y-%m-%dT%H:%M:%S.%f')
except ValueError:
dt = datetime.datetime.strptime(s, '%Y-%m-%dT%H:%M:%S')
return dt | Return a datetime instance from a string generated by now_field.
IMPORTANT: the datetime will be in UTC |
def get_ftp(ftp_conf, debug=0):
server = ftp_conf.get('server')
user = ftp_conf.get('user')
password = ftp_conf.get('password')
start_path = ftp_conf.get('start_path')
slog.info("Connecting FTP server %s ......", server)
ftpStr = 'ftp://%s/'%server
if start_path:
ftpStr = ftpStr+start_path
ftp = ftplib.FTP(server, user, password)
ftp.set_debuglevel(debug)
if start_path:
ftp.cwd(start_path)
serverFiles = ftp.nlst()
slog.info('There are some files in %s:\n[%s]'%(ftpStr, ', '.join(serverFiles)))
return ftp, ftpStr | 得到一个 已经打开的FTP 实例,和一个 ftp 路径。
:param dict ftp_conf: ftp配置文件,格式如下:
>>> {
>>> 'server':'127.0.0.1',
>>> 'start_path':None,
>>> 'user':'admin',
>>> 'password':'123456',
>>> }
:returns: ftp, ftpserverstr
:rtype: :class:`ftplib.FTP` , str |
def upload_file(file_path, remote_path, ftp_conf, remove_file=False):
check_ftp_conf(ftp_conf)
ftp, ftpStr = get_ftp(ftp_conf)
lf = open(file_path, 'rb')
slog.info('Uploading "%s" to "%s/%s" ......'%(file_path, ftpStr, remote_path))
ftp.storbinary("STOR %s"%remote_path, lf)
filelist = ftp.nlst()
ftp.quit()
lf.close()
if remove_file:
os.remove(file_path)
slog.info('Upload done.')
return filelist | 上传第一个指定的文件到 FTP 服务器。
:param str file_path: 待上传文件的绝对路径。
:param str remote_path: 文件在 FTP 服务器上的相对路径(相对于 FTP 服务器的初始路径)。
:param dict ftp_conf: ftp配置文件,详见 :func:`get_ftp` 。
:param bool remove_file: 上传成功后是否删除本地文件。
:returns: FTP 服务器上的文件列表
:rtype: list |
def retrieve_data(self):
#==== Retrieve data ====#
df = self.manager.get_historic_data(self.start.date(), self.end.date())
df.replace(0, np.nan, inplace=True)
return df | Retrives data as a DataFrame. |
def get_min_risk(self, weights, cov_matrix):
def func(weights):
"""The objective function that minimizes variance."""
return np.matmul(np.matmul(weights.transpose(), cov_matrix), weights)
def func_deriv(weights):
"""The derivative of the objective function."""
return (
np.matmul(weights.transpose(), cov_matrix.transpose()) +
np.matmul(weights.transpose(), cov_matrix)
)
constraints = ({'type': 'eq', 'fun': lambda weights: (weights.sum() - 1)})
solution = self.solve_minimize(func, weights, constraints, func_deriv=func_deriv)
# NOTE: `min_risk` is unused, but may be helpful later.
# min_risk = solution.fun
allocation = solution.x
return allocation | Minimizes the variance of a portfolio. |
def get_max_return(self, weights, returns):
def func(weights):
"""The objective function that maximizes returns."""
return np.dot(weights, returns.values) * -1
constraints = ({'type': 'eq', 'fun': lambda weights: (weights.sum() - 1)})
solution = self.solve_minimize(func, weights, constraints)
max_return = solution.fun * -1
# NOTE: `max_risk` is not used anywhere, but may be helpful in the future.
# allocation = solution.x
# max_risk = np.matmul(
# np.matmul(allocation.transpose(), cov_matrix), allocation
# )
return max_return | Maximizes the returns of a portfolio. |
def efficient_frontier(
self,
returns,
cov_matrix,
min_return,
max_return,
count
):
columns = [coin for coin in self.SUPPORTED_COINS]
# columns.append('Return')
# columns.append('Risk')
values = pd.DataFrame(columns=columns)
weights = [1/len(self.SUPPORTED_COINS)] * len(self.SUPPORTED_COINS)
def func(weights):
"""The objective function that minimizes variance."""
return np.matmul(np.matmul(weights.transpose(), cov_matrix), weights)
def func_deriv(weights):
"""The derivative of the objective function."""
return (
np.matmul(weights.transpose(), cov_matrix.transpose()) +
np.matmul(weights.transpose(), cov_matrix)
)
for point in np.linspace(min_return, max_return, count):
constraints = (
{'type': 'eq', 'fun': lambda weights: (weights.sum() - 1)},
{'type': 'ineq', 'fun': lambda weights, i=point: (
np.dot(weights, returns.values) - i
)}
)
solution = self.solve_minimize(func, weights, constraints, func_deriv=func_deriv)
columns = {}
for index, coin in enumerate(self.SUPPORTED_COINS):
columns[coin] = math.floor(solution.x[index] * 100 * 100) / 100
# NOTE: These lines could be helpful, but are commented out right now.
# columns['Return'] = round(np.dot(solution.x, returns), 6)
# columns['Risk'] = round(solution.fun, 6)
values = values.append(columns, ignore_index=True)
return values | Returns a DataFrame of efficient portfolio allocations for `count` risk
indices. |
def solve_minimize(
self,
func,
weights,
constraints,
lower_bound=0.0,
upper_bound=1.0,
func_deriv=False
):
bounds = ((lower_bound, upper_bound), ) * len(self.SUPPORTED_COINS)
return minimize(
fun=func, x0=weights, jac=func_deriv, bounds=bounds,
constraints=constraints, method='SLSQP', options={'disp': False}
) | Returns the solution to a minimization problem. |
def allocate(self):
df = self.manager.get_historic_data()[self.SUPPORTED_COINS]
#==== Calculate the daily changes ====#
change_columns = []
for column in df:
if column in self.SUPPORTED_COINS:
change_column = '{}_change'.format(column)
values = pd.Series(
(df[column].shift(-1) - df[column]) /
-df[column].shift(-1)
).values
df[change_column] = values
change_columns.append(change_column)
# print(df.head())
# print(df.tail())
#==== Variances and returns ====#
columns = change_columns
# NOTE: `risks` is not used, but may be used in the future
risks = df[columns].apply(np.nanvar, axis=0)
# print('\nVariance:\n{}\n'.format(risks))
returns = df[columns].apply(np.nanmean, axis=0)
# print('\nExpected returns:\n{}\n'.format(returns))
#==== Calculate risk and expected return ====#
cov_matrix = df[columns].cov()
# NOTE: The diagonal variances weren't calculated correctly, so here is a fix.
cov_matrix.values[[np.arange(len(self.SUPPORTED_COINS))] * 2] = df[columns].apply(np.nanvar, axis=0)
weights = np.array([1/len(self.SUPPORTED_COINS)] * len(self.SUPPORTED_COINS)).reshape(len(self.SUPPORTED_COINS), 1)
#==== Calculate portfolio with the minimum risk ====#
min_risk = self.get_min_risk(weights, cov_matrix)
min_return = np.dot(min_risk, returns.values)
#==== Calculate portfolio with the maximum return ====#
max_return = self.get_max_return(weights, returns)
#==== Calculate efficient frontier ====#
frontier = self.efficient_frontier(
returns, cov_matrix, min_return, max_return, 6
)
return frontier | Returns an efficient portfolio allocation for the given risk index. |
def handle_default_options(options):
if options.settings:
#Set the percept_settings_module (picked up by settings in conf.base)
os.environ['PERCEPT_SETTINGS_MODULE'] = options.settings
if options.pythonpath:
#Append the pythonpath and the directory one up from the pythonpath to sys.path for importing
options.pythonpath = os.path.abspath(os.path.expanduser(options.pythonpath))
up_one_path = os.path.abspath(os.path.join(options.pythonpath, ".."))
sys.path.append(options.pythonpath)
sys.path.append(up_one_path)
return options | Pass in a Values instance from OptionParser. Handle settings and pythonpath
options - Values from OptionParser |
def create_parser(self, prog_name, subcommand):
parser = OptionParser(prog=prog_name,
usage=self.usage(subcommand),
option_list=self.option_list)
return parser | Create an OptionParser
prog_name - Name of a command
subcommand - Name of a subcommand |
def hook(name=None, *args, **kwargs):
def decorator(f):
if not hasattr(f, "hooks"):
f.hooks = []
f.hooks.append((name or f.__name__, args, kwargs))
return f
return decorator | Decorator to register the function as a hook |
def register_hooks(func, hooks, obj):
for name, args, kwargs in hooks:
hook = getattr(obj, name)
force_call = kwargs.pop("_force_call", False)
if force_call or len(args) > 0 or len(kwargs) > 0:
hook = hook(*args, **kwargs)
hook(func) | Register func on obj via hooks.
Hooks should be a tuple of (name, args, kwargs) where
name is a method name of obj. If args or kwargs are not empty,
the method will be called first and expect a new function as return. |
def action(*args, **kwargs):
def decorator(f):
return ActionFunction(f, *args, **kwargs)
return decorator | Transforms functions or class methods into actions.
Optionnaly, you can define a function to be used as the view initializer:
@action()
def my_action():
pass
@my_action.init_view
def my_action_init_view(view, options):
pass |
def with_actions(actions_or_group_name, actions=None):
group = None
if isinstance(actions_or_group_name, str):
group = actions_or_group_name
else:
actions = actions_or_group_name
def decorator(f):
if isinstance(f, WithActionsDecorator):
dec = f
else:
dec = WithActionsDecorator(f)
dec.actions.extend(load_actions(actions, group=group))
return dec
return decorator | Executes the list of actions before/after the function
Actions should be a list where items are action names as
strings or a dict. See frasco.actions.loaders.load_action(). |
def expose(rule, **options):
def decorator(f):
if not hasattr(f, "urls"):
f.urls = []
if isinstance(rule, (list, tuple)):
f.urls.extend(rule)
else:
f.urls.append((rule, options))
return f
return decorator | Decorator to add an url rule to a function |
def _create_unicode_map():
unicode_map = {}
for beta, uni in _map.BETACODE_MAP.items():
# Include decomposed equivalent where necessary.
norm = unicodedata.normalize('NFC', uni)
unicode_map[norm] = beta
unicode_map[uni] = beta
# Add the final sigmas.
final_sigma_norm = unicodedata.normalize('NFC', _FINAL_LC_SIGMA)
unicode_map[final_sigma_norm] = 's'
unicode_map[_FINAL_LC_SIGMA] = 's'
return unicode_map | Create the inverse map from unicode to betacode.
Returns:
The hash map to convert unicode characters to the beta code representation. |
def _create_conversion_trie(strict):
t = pygtrie.CharTrie()
for beta, uni in _map.BETACODE_MAP.items():
if strict:
t[beta] = uni
else:
# The order of accents is very strict and weak. Allow for many orders of
# accents between asterisk and letter or after letter. This does not
# introduce ambiguity since each betacode token only has one letter and
# either starts with a asterisk or a letter.
diacritics = beta[1:]
perms = itertools.permutations(diacritics)
for perm in perms:
perm_str = beta[0] + ''.join(perm)
t[perm_str.lower()] = uni
t[perm_str.upper()] = uni
return t | Create the trie for betacode conversion.
Args:
text: The beta code text to convert. All of this text must be betacode.
strict: Flag to allow for flexible diacritic order on input.
Returns:
The trie for conversion. |
def _find_max_beta_token_len():
max_beta_len = -1
for beta, uni in _map.BETACODE_MAP.items():
if len(beta) > max_beta_len:
max_beta_len = len(beta)
return max_beta_len | Finds the maximum length of a single betacode token.
Returns:
The length of the longest key in the betacode map, which corresponds to the
longest single betacode token. |
def beta_to_uni(text, strict=False):
# Check if the requested configuration for conversion already has a trie
# stored otherwise convert it.
param_key = (strict,)
try:
t = _BETA_CONVERSION_TRIES[param_key]
except KeyError:
t = _create_conversion_trie(*param_key)
_BETA_CONVERSION_TRIES[param_key] = t
transform = []
idx = 0
possible_word_boundary = False
while idx < len(text):
if possible_word_boundary and _penultimate_sigma_word_final(transform):
transform[-2] = _FINAL_LC_SIGMA
step = t.longest_prefix(text[idx:idx + _MAX_BETA_TOKEN_LEN])
if step:
possible_word_boundary = text[idx] in _BETA_PUNCTUATION
key, value = step
transform.append(value)
idx += len(key)
else:
possible_word_boundary = True
transform.append(text[idx])
idx += 1
# Check one last time in case there is some whitespace or punctuation at the
# end and check if the last character is a sigma.
if possible_word_boundary and _penultimate_sigma_word_final(transform):
transform[-2] = _FINAL_LC_SIGMA
elif len(transform) > 0 and transform[-1] == _MEDIAL_LC_SIGMA:
transform[-1] = _FINAL_LC_SIGMA
converted = ''.join(transform)
return converted | Converts the given text from betacode to unicode.
Args:
text: The beta code text to convert. All of this text must be betacode.
strict: Flag to allow for flexible diacritic order on input.
Returns:
The converted text. |
def uni_to_beta(text):
u = _UNICODE_MAP
transform = []
for ch in text:
try:
conv = u[ch]
except KeyError:
conv = ch
transform.append(conv)
converted = ''.join(transform)
return converted | Convert unicode text to a betacode equivalent.
This method can handle tónos or oxeîa characters in the input.
Args:
text: The text to convert to betacode. This text does not have to all be
Greek polytonic text, and only Greek characters will be converted. Note
that in this case, you cannot convert to beta and then back to unicode.
Returns:
The betacode equivalent of the inputted text where applicable. |
def __calculate_order(self, node_dict):
if len(node_dict.keys()) != len(set(node_dict.keys())):
raise DependencyTreeException("Duplicate Keys Exist in node dictionary!")
valid_order = [node for node, dependencies in node_dict.items() if len(dependencies) == 0]
remaining_nodes = [node for node in node_dict.keys() if node not in valid_order]
while len(remaining_nodes) > 0:
node_added = False
for node in remaining_nodes:
dependencies = [d for d in node_dict[node] if d not in valid_order]
if len(dependencies) == 0:
valid_order.append(node)
remaining_nodes.remove(node)
node_added = True
if not node_added:
# the tree must be invalid, as it was not possible to remove a node.
# it's hard to find all the errors, so just spit out the first one you can find.
invalid_node = remaining_nodes[0]
invalid_dependency = ', '.join(node_dict[invalid_node])
if invalid_dependency not in remaining_nodes:
raise DependencyTreeException(
"Missing dependency! One or more of ({dependency}) are missing for {dependant}.".format(
dependant=invalid_node, dependency=invalid_dependency))
else:
raise DependencyTreeException("The dependency %s is cyclic or dependent on a cyclic dependency" % invalid_dependency)
return valid_order | Determine a valid ordering of the nodes in which a node is not called before all of it's dependencies.
Raise an error if there is a cycle, or nodes are missing. |
def read_input(self, filename, has_header=True):
stream = open(filename)
reader = csv.reader(stream)
csv_data = []
for (i, row) in enumerate(reader):
if i==0:
if not has_header:
csv_data.append([str(i) for i in xrange(0,len(row))])
csv_data.append(row)
self.data = csv_data | filename is any filename, or something on which open() can be called
for example:
csv_input = CSVInput()
csv_input.read_input("csvfile.csv") |
def pprint_out(dct: Dict):
for name, val in dct.items():
print(name + ':')
pprint(val, indent=4) | Utility methods to pretty-print a dictionary that is typically outputted by parsyfiles (an ordered dict)
:param dct:
:return: |
def warn_import_error(type_of_obj_support: str, caught: ImportError):
msg = StringIO()
msg.writelines('Import Error while trying to add support for ' + type_of_obj_support + '. You may continue but '
'the associated parsers and converters wont be available : \n')
traceback.print_tb(caught.__traceback__, file=msg)
msg.writelines(str(caught.__class__.__name__) + ' : ' + str(caught) + '\n')
warn(msg.getvalue()) | Utility method to print a warning message about failed import of some modules
:param type_of_obj_support:
:param caught:
:return: |
def create_parser_options(lazy_mfcollection_parsing: bool = False) -> Dict[str, Dict[str, Any]]:
return {MultifileCollectionParser.__name__: {'lazy_parsing': lazy_mfcollection_parsing}} | Utility method to create a default options structure with the lazy parsing inside
:param lazy_mfcollection_parsing:
:return: the options structure filled with lazyparsing option (for the MultifileCollectionParser) |
def add_parser_options(options: Dict[str, Dict[str, Any]], parser_id: str, parser_options: Dict[str, Dict[str, Any]],
overwrite: bool = False):
if parser_id in options.keys() and not overwrite:
raise ValueError('There are already options in this dictionary for parser id ' + parser_id)
options[parser_id] = parser_options
return options | Utility method to add options for a given parser, to the provided options structure
:param options:
:param parser_id:
:param parser_options:
:param overwrite: True to silently overwrite. Otherwise an error will be thrown
:return: |
def parse_item(location: str, item_type: Type[T], item_name_for_log: str = None,
file_mapping_conf: FileMappingConfiguration = None,
logger: Logger = default_logger, lazy_mfcollection_parsing: bool = False) -> T:
rp = _create_parser_from_default(logger)
opts = create_parser_options(lazy_mfcollection_parsing=lazy_mfcollection_parsing)
return rp.parse_item(location, item_type, item_name_for_log=item_name_for_log, file_mapping_conf=file_mapping_conf,
options=opts) | Creates a RootParser() and calls its parse_item() method
:param location:
:param item_type:
:param item_name_for_log:
:param file_mapping_conf:
:param logger:
:param lazy_mfcollection_parsing:
:return: |
def parse_collection(location: str, base_item_type: Type[T], item_name_for_log: str = None,
file_mapping_conf: FileMappingConfiguration = None, logger: Logger = default_logger,
lazy_mfcollection_parsing: bool = False)\
-> Dict[str, T]:
rp = _create_parser_from_default(logger)
opts = create_parser_options(lazy_mfcollection_parsing=lazy_mfcollection_parsing)
return rp.parse_collection(location, base_item_type, item_name_for_log=item_name_for_log,
file_mapping_conf=file_mapping_conf, options=opts) | Utility method to create a RootParser() with default configuration and call its parse_collection() method
:param location:
:param base_item_type:
:param item_name_for_log:
:param file_mapping_conf:
:param logger:
:param lazy_mfcollection_parsing:
:return: |
def install_basic_multifile_support(self):
if not self.multifile_installed:
self.register_parser(MultifileCollectionParser(self))
self.register_parser(MultifileObjectParser(self, self))
self.multifile_installed = True
else:
raise Exception('Multifile support has already been installed') | Utility method for users who created a RootParser with register_default_plugins=False, in order to register only
the multifile support
:return: |
def parse_collection(self, item_file_prefix: str, base_item_type: Type[T], item_name_for_log: str = None,
file_mapping_conf: FileMappingConfiguration = None,
options: Dict[str, Dict[str, Any]] = None) -> Dict[str, T]:
# -- item_name_for_log
item_name_for_log = item_name_for_log or ''
check_var(item_name_for_log, var_types=str, var_name='item_name_for_log')
# creating the wrapping dictionary type
collection_type = Dict[str, base_item_type]
if len(item_name_for_log) > 0:
item_name_for_log = item_name_for_log + ' '
self.logger.debug('**** Starting to parse ' + item_name_for_log + 'collection of <'
+ get_pretty_type_str(base_item_type) + '> at location ' + item_file_prefix +' ****')
# common steps
return self._parse__item(collection_type, item_file_prefix, file_mapping_conf, options=options) | Main method to parse a collection of items of type 'base_item_type'.
:param item_file_prefix:
:param base_item_type:
:param item_name_for_log:
:param file_mapping_conf:
:param options:
:return: |
def parse_item(self, location: str, item_type: Type[T], item_name_for_log: str = None,
file_mapping_conf: FileMappingConfiguration = None, options: Dict[str, Dict[str, Any]] = None) -> T:
# -- item_name_for_log
item_name_for_log = item_name_for_log or ''
check_var(item_name_for_log, var_types=str, var_name='item_name_for_log')
if len(item_name_for_log) > 0:
item_name_for_log = item_name_for_log + ' '
self.logger.debug('**** Starting to parse single object ' + item_name_for_log + 'of type <'
+ get_pretty_type_str(item_type) + '> at location ' + location + ' ****')
# common steps
return self._parse__item(item_type, location, file_mapping_conf, options=options) | Main method to parse an item of type item_type
:param location:
:param item_type:
:param item_name_for_log:
:param file_mapping_conf:
:param options:
:return: |
def _parse__item(self, item_type: Type[T], item_file_prefix: str,
file_mapping_conf: FileMappingConfiguration = None,
options: Dict[str, Dict[str, Any]] = None) -> T:
# for consistency : if options is None, default to the default values of create_parser_options
options = options or create_parser_options()
# creating the persisted object (this performs required checks)
file_mapping_conf = file_mapping_conf or WrappedFileMappingConfiguration()
obj = file_mapping_conf.create_persisted_object(item_file_prefix, logger=self.logger)
# print('')
self.logger.debug('')
# create the parsing plan
pp = self.create_parsing_plan(item_type, obj, logger=self.logger)
# print('')
self.logger.debug('')
# parse
res = pp.execute(logger=self.logger, options=options)
# print('')
self.logger.debug('')
return res | Common parsing steps to parse an item
:param item_type:
:param item_file_prefix:
:param file_mapping_conf:
:param options:
:return: |
def findSubCommand(args):
# If the only command we find is the first element of args, we've found the
# driver script itself and re-executing it will cause an infinite loop, so
# don't even look at the first element on its own.
for n in range(len(args) - 1):
command = '-'.join(args[:(len(args) - n)])
commandArgs = args[len(args) - n:]
if isProgram(command):
return (command, commandArgs)
raise StandardError("Could not find a %s subcommand executable" % command) | Given a list ['foo','bar', 'baz'], attempts to create a command name in the
format 'foo-bar-baz'. If that command exists, we run it. If it doesn't, we
check to see if foo-bar exists, in which case we run `foo-bar baz`. We keep
taking chunks off the end of the command name and adding them to the argument
list until we find a valid command name we can run.
This allows us to easily make git-style command drivers where for example we
have a driver script, foo, and subcommand scripts foo-bar and foo-baz, and when
the user types `foo bar foobar` we find the foo-bar script and run it as
`foo-bar foobar`
:param list|tuple args: list to try and convert to a command args pair
:returns: command and arguments list
:rtype: tuple
:raises StandardError: if the args can't be matched to an executable subcommand |
def SpamsumDistance(ssA, ssB):
'''
returns the spamsum distance between ssA and ssB
if they use a different block size, assume maximum distance
otherwise returns the LevDistance
'''
mA = re.match('^(\d+)[:](.*)$', ssA)
mB = re.match('^(\d+)[:](.*)$', ssB)
if mA == None or mB == None:
raise "do not appear to be spamsum signatures"
if mA.group(1) != mB.group(1):
return max([len(mA.group(2)), len(mB.group(2))])
else:
return LevDistance(mA.group(2), mB.group(2)f SpamsumDistance(ssA, ssB):
'''
returns the spamsum distance between ssA and ssB
if they use a different block size, assume maximum distance
otherwise returns the LevDistance
'''
mA = re.match('^(\d+)[:](.*)$', ssA)
mB = re.match('^(\d+)[:](.*)$', ssB)
if mA == None or mB == None:
raise "do not appear to be spamsum signatures"
if mA.group(1) != mB.group(1):
return max([len(mA.group(2)), len(mB.group(2))])
else:
return LevDistance(mA.group(2), mB.group(2)) | returns the spamsum distance between ssA and ssB
if they use a different block size, assume maximum distance
otherwise returns the LevDistance |
def terms(cls, tags, minimum_match=None):
'''
A query that match on any (configurable) of the provided terms. This is a simpler syntax query for using a bool query with several term queries in the should clauses. For example:
{
"terms" : {
"tags" : [ "blue", "pill" ],
"minimum_match" : 1
}
}'''
instance = cls(terms={'tags': tags})
if minimum_match is not None:
instance['terms']['minimum_match'] = minimum_match
return instancf terms(cls, tags, minimum_match=None):
'''
A query that match on any (configurable) of the provided terms. This is a simpler syntax query for using a bool query with several term queries in the should clauses. For example:
{
"terms" : {
"tags" : [ "blue", "pill" ],
"minimum_match" : 1
}
}'''
instance = cls(terms={'tags': tags})
if minimum_match is not None:
instance['terms']['minimum_match'] = minimum_match
return instance | A query that match on any (configurable) of the provided terms. This is a simpler syntax query for using a bool query with several term queries in the should clauses. For example:
{
"terms" : {
"tags" : [ "blue", "pill" ],
"minimum_match" : 1
}
} |
def match(cls, field, query, operator=None):
'''
A family of match queries that accept text/numerics/dates, analyzes it, and constructs a query out of it. For example:
{
"match" : {
"message" : "this is a test"
}
}
Note, message is the name of a field, you can subsitute the name of any field (including _all) instead.
'''
instance = cls(match={field: {'query': query}})
if operator is not None:
instance['match'][field]['operator'] = operator
return instancf match(cls, field, query, operator=None):
'''
A family of match queries that accept text/numerics/dates, analyzes it, and constructs a query out of it. For example:
{
"match" : {
"message" : "this is a test"
}
}
Note, message is the name of a field, you can subsitute the name of any field (including _all) instead.
'''
instance = cls(match={field: {'query': query}})
if operator is not None:
instance['match'][field]['operator'] = operator
return instance | A family of match queries that accept text/numerics/dates, analyzes it, and constructs a query out of it. For example:
{
"match" : {
"message" : "this is a test"
}
}
Note, message is the name of a field, you can subsitute the name of any field (including _all) instead. |
def fuzzy(cls, field, value, boost=None, min_similarity=None, prefix_length=None):
'''
http://www.elasticsearch.org/guide/reference/query-dsl/fuzzy-query.html
A fuzzy based query that uses similarity based on Levenshtein (edit distance) algorithm.
'''
instance = cls(fuzzy={field: {'value': value}})
if boost is not None:
instance['fuzzy'][field]['boost'] = boost
if min_similarity is not None:
instance['fuzzy'][field]['min_similarity'] = min_similarity
if prefix_length is not None:
instance['fuzzy'][field]['prefix_length'] = prefix_length
return instancf fuzzy(cls, field, value, boost=None, min_similarity=None, prefix_length=None):
'''
http://www.elasticsearch.org/guide/reference/query-dsl/fuzzy-query.html
A fuzzy based query that uses similarity based on Levenshtein (edit distance) algorithm.
'''
instance = cls(fuzzy={field: {'value': value}})
if boost is not None:
instance['fuzzy'][field]['boost'] = boost
if min_similarity is not None:
instance['fuzzy'][field]['min_similarity'] = min_similarity
if prefix_length is not None:
instance['fuzzy'][field]['prefix_length'] = prefix_length
return instance | http://www.elasticsearch.org/guide/reference/query-dsl/fuzzy-query.html
A fuzzy based query that uses similarity based on Levenshtein (edit distance) algorithm. |
def has_child(cls, child_type, query):
'''
http://www.elasticsearch.org/guide/reference/query-dsl/has-child-query.html
The has_child query accepts a query and the child type to run against, and results in parent documents that have child docs matching the query.
> child_query = ElasticQuery().term(tag='something')
> query = ElasticQuery().has_Child('blog_tag', child_query)
'''
instance = cls(has_child={'type': child_type, 'query': query})
return instancf has_child(cls, child_type, query):
'''
http://www.elasticsearch.org/guide/reference/query-dsl/has-child-query.html
The has_child query accepts a query and the child type to run against, and results in parent documents that have child docs matching the query.
> child_query = ElasticQuery().term(tag='something')
> query = ElasticQuery().has_Child('blog_tag', child_query)
'''
instance = cls(has_child={'type': child_type, 'query': query})
return instance | http://www.elasticsearch.org/guide/reference/query-dsl/has-child-query.html
The has_child query accepts a query and the child type to run against, and results in parent documents that have child docs matching the query.
> child_query = ElasticQuery().term(tag='something')
> query = ElasticQuery().has_Child('blog_tag', child_query) |
def queryByPortSensor(portiaConfig, edgeId, port, sensor, last=False, params={ 'from': None, 'to': None, 'order': None, 'precision': 'ms', 'limit': None }):
header = {'Accept': 'text/csv'}
if last == False:
endpoint = '/select/device/{0}/port/{1}/sensor/{2}{3}'.format( edgeId, port, sensor, utils.buildGetParams(params) )
else:
endpoint = '/select/device/{0}/port/{1}/sensor/{2}/last{3}'.format( edgeId, port, sensor, utils.buildGetParams(params) )
response = utils.httpGetRequest(portiaConfig, endpoint, header)
if response.status_code == 200:
try:
dimensionSeries = pandas.read_csv( StringIO(response.text), sep=';' )
if portiaConfig['debug']:
print( '[portia-debug]: {0} rows'.format( len(dimensionSeries.index) ) )
return dimensionSeries
except:
raise Exception('couldn\'t create pandas data frame')
else:
raise Exception('couldn\'t retrieve data') | Returns a pandas data frame with the portia select resultset |
def try_parse_num_and_booleans(num_str):
if isinstance(num_str, str):
# bool
if num_str.lower() == 'true':
return True
elif num_str.lower() == 'false':
return False
# int
if num_str.isdigit():
return int(num_str)
# float
try:
return float(num_str)
except ValueError:
# give up
return num_str
else:
# dont try
return num_str | Tries to parse the provided string as a number or boolean
:param num_str:
:return: |
def read_dict_from_properties(desired_type: Type[dict], file_object: TextIOBase,
logger: Logger, conversion_finder: ConversionFinder, **kwargs) -> Dict[str, Any]:
# right now jprops relies on a byte stream. So we convert back our nicely decoded Text stream to a unicode
# byte stream ! (urgh)
class Unicoder:
def __init__(self, file_object):
self.f = file_object
def __iter__(self):
return self
def __next__(self):
line = self.f.__next__()
return line.encode(encoding='utf-8')
res = jprops.load_properties(Unicoder(file_object))
# first automatic conversion of strings > numbers
res = {key: try_parse_num_and_booleans(val) for key, val in res.items()}
# further convert if required
return ConversionFinder.convert_collection_values_according_to_pep(res, desired_type, conversion_finder, logger,
**kwargs) | Helper method to read a dictionary from a .properties file (java-style) using jprops.
Since jprops does not provide automatic handling for boolean and numbers, this tries to add the feature.
:param file_object:
:return: |
def get_default_jprops_parsers(parser_finder: ParserFinder, conversion_finder: ConversionFinder) -> List[AnyParser]:
return [SingleFileParserFunction(parser_function=read_dict_from_properties,
streaming_mode=True, custom_name='read_dict_from_properties',
supported_exts={'.properties', '.txt'},
supported_types={dict},
function_args={'conversion_finder': conversion_finder}),
# SingleFileParserFunction(parser_function=read_list_from_properties,
# streaming_mode=True,
# supported_exts={'.properties', '.txt'},
# supported_types={list}),
] | Utility method to return the default parsers able to parse a dictionary from a properties file.
:return: |
def hook(event=None, dependencies=None):
def wrapper(func):
"""I'm a simple wrapper that manages event hooking"""
func.__deps__ = dependencies
EVENTS.hook(func, event, dependencies)
return func
return wrapper | Hooking decorator. Just `@hook(event, dependencies)` on your function
Kwargs:
event (str): String or Iterable with events to hook
dependencies (str): String or Iterable with modules whose hooks have
to be called before this one for **this** event
Wraps :func:`EventList.hook` |
def load(path):
importpath = path.replace("/", ".").replace("\\", ".")
if importpath[-3:] == ".py":
importpath = importpath[:-3]
try:
importlib.import_module(importpath)
except (ModuleNotFoundError, TypeError):
exec(open(path).read()) | Helper function that tries to load a filepath (or python module notation)
as a python module and on failure `exec` it.
Args:
path (str): Path or module to load
The function tries to import `example.module` when either `example.module`,
`example/module` or `example/module.py` is given. |
def add_image(self, image_path, annotations):
self.image_paths.append(image_path)
self.bounding_boxes.append([bounding_box_from_annotation(**a) for a in annotations]) | Adds an image and its bounding boxes to the current list of files
The bounding boxes are automatically estimated based on the given annotations.
**Parameters:**
``image_path`` : str
The file name of the image, including its full path
``annotations`` : [dict]
A list of annotations, i.e., where each annotation can be anything that :py:func:`bounding_box_from_annotation` can handle; this list can be empty, in case the image does not contain any faces |