code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def command():
return (
OneOrMore(
Word(approved_printables+' ').setResultsName('command',
listAllMatches=True) ^
Grammar.__command_input_output.setResultsName('_in',
listAllMatches=True)
)
) | Grammar for commands found in the overall input files. |
def listen_to_event_updates():
def callback(event):
print('Event:', event)
client.create_event_subscription(instance='simulator', on_data=callback)
sleep(5) | Subscribe to events. |
def get_current_scene_node():
c = cmds.namespaceInfo(':', listOnlyDependencyNodes=True, absoluteName=True, dagPath=True)
l = cmds.ls(c, type='jb_sceneNode', absoluteName=True)
if not l:
return
else:
for n in sorted(l):
if not cmds.listConnections("%s.reftrack" % n, d=False):
return n | Return the name of the jb_sceneNode, that describes the current scene or None if there is no scene node.
:returns: the full name of the node or none, if there is no scene node
:rtype: str | None
:raises: None |
def updateSpec(self, *args, **kwargs):
if args[0] is None:
self.specPlot.clearImg()
elif isinstance(args[0], basestring):
self.specPlot.fromFile(*args, **kwargs)
else:
self.specPlot.updateData(*args,**kwargs) | Updates the spectrogram. First argument can be a filename,
or a data array. If no arguments are given, clears the spectrograms.
For other arguments, see: :meth:`SpecWidget.updateData<sparkle.gui.plotting.pyqtgraph_widgets.SpecWidget.updateData>` |
def showSpec(self, fname):
if not self.specPlot.hasImg() and fname is not None:
self.specPlot.fromFile(fname) | Draws the spectrogram if it is currently None |
def updateSpiketrace(self, xdata, ydata, plotname=None):
if plotname is None:
plotname = self.responsePlots.keys()[0]
if len(ydata.shape) == 1:
self.responsePlots[plotname].updateData(axeskey='response', x=xdata, y=ydata)
else:
self.responsePlots[plotname].addTraces(xdata, ydata) | Updates the spike trace
:param xdata: index values
:type xdata: numpy.ndarray
:param ydata: values to plot
:type ydata: numpy.ndarray |
def addRasterPoints(self, xdata, repnum, plotname=None):
if plotname is None:
plotname = self.responsePlots.keys()[0]
ydata = np.ones_like(xdata)*repnum
self.responsePlots[plotname].appendData('raster', xdata, ydata) | Add a list (or numpy array) of points to raster plot,
in any order.
:param xdata: bin centers
:param ydata: rep number |
def updateSignal(self, xdata, ydata, plotname=None):
if plotname is None:
plotname = self.responsePlots.keys()[0]
self.responsePlots[plotname].updateData(axeskey='stim', x=xdata, y=ydata) | Updates the trace of the outgoing signal
:param xdata: time points of recording
:param ydata: brain potential at time points |
def setXlimits(self, lims):
# update all "linked", plots
self.specPlot.setXlim(lims)
for plot in self.responsePlots.values():
plot.setXlim(lims)
# ridiculous...
sizes = self.splittersw.sizes()
if len(sizes) > 1:
if self.badbadbad:
sizes[0] +=1
sizes[1] -=1
else:
sizes[0] -=1
sizes[1] +=1
self.badbadbad = not self.badbadbad
self.splittersw.setSizes(sizes)
self._ignore_range_signal = False | Sets the X axis limits of the trace plot
:param lims: (min, max) of x axis, in same units as data
:type lims: (float, float) |
def setNreps(self, nreps):
for plot in self.responsePlots.values():
plot.setNreps(nreps) | Sets the number of reps before the raster plot resets |
def specAutoRange(self):
trace_range = self.responsePlots.values()[0].viewRange()[0]
vb = self.specPlot.getViewBox()
vb.autoRange(padding=0)
self.specPlot.setXlim(trace_range) | Auto adjusts the visible range of the spectrogram |
def interpret_header(self):
# handle special cases since date-obs field changed names
if 'DATE_OBS' in self.header:
self.date = self.header['DATE_OBS']
elif 'DATE-OBS' in self.header:
self.date = self.header['DATE-OBS']
else:
raise Exception("Image does not have a DATE_OBS or DATE-OBS field")
self.cy, self.cx = self.header['CRPIX1'], self.header['CRPIX2']
sun_radius_angular = sun.solar_semidiameter_angular_size(t=time.parse_time(self.date)).arcsec
arcsec_per_pixel = self.header['CDELT1']
self.sun_radius_pixel = (sun_radius_angular / arcsec_per_pixel) | Read pertinent information from the image headers,
especially location and radius of the Sun to calculate the default thematic map
:return: setes self.date, self.cy, self.cx, and self.sun_radius_pixel |
def save(self):
out = Outgest(self.output, self.selection_array.astype('uint8'), self.headers, self.config_path)
out.save()
out.upload() | Save as a FITS file and attempt an upload if designated in the configuration file |
def on_exit(self):
answer = messagebox.askyesnocancel("Exit", "Do you want to save as you quit the application?")
if answer:
self.save()
self.quit()
self.destroy()
elif answer is None:
pass # the cancel action
else:
self.quit()
self.destroy() | When you click to exit, this function is called, prompts whether to save |
def make_gui(self):
self.option_window = Toplevel()
self.option_window.protocol("WM_DELETE_WINDOW", self.on_exit)
self.canvas_frame = tk.Frame(self, height=500)
self.option_frame = tk.Frame(self.option_window, height=300)
self.canvas_frame.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
self.option_frame.pack(side=tk.RIGHT, fill=None, expand=False)
self.make_options_frame()
self.make_canvas_frame()
self.disable_singlecolor() | Setups the general structure of the gui, the first function called |
def configure_threecolor_image(self):
order = {'red': 0, 'green': 1, 'blue': 2}
self.image = np.zeros((self.shape[0], self.shape[1], 3))
for color, var in self.multicolorvars.items():
channel = var.get() # determine which channel should be plotted as this color
self.image[:, :, order[color]] = self.data[channel]
# scale the image by the power
self.image[:, :, order[color]] = np.power(self.image[:, :, order[color]],
self.multicolorpower[color].get())
# adjust the percentile thresholds
lower = np.nanpercentile(self.image[:, :, order[color]], self.multicolormin[color].get())
upper = np.nanpercentile(self.image[:, :, order[color]], self.multicolormax[color].get())
self.image[np.where(self.image[:, :, order[color]] < lower)] = lower
self.image[np.where(self.image[:, :, order[color]] > upper)] = upper
# image values must be between (0,1) so scale image
for color, index in order.items():
self.image[:, :, index] /= np.nanmax(self.image[:, :, index]) | configures the three color image according to the requested parameters
:return: nothing, just updates self.image |
def configure_singlecolor_image(self, scale=False):
# determine which channel to use
self.image = self.data[self.singlecolorvar.get()]
# scale the image by requested power
self.image = np.power(self.image, self.singlecolorpower.get())
# adjust the percentile thresholds
lower = np.nanpercentile(self.image, self.singlecolormin.get())
upper = np.nanpercentile(self.image, self.singlecolormax.get())
self.image[self.image < lower] = lower
self.image[self.image > upper] = upper
# image values must be between (0,1) so scale image
self.image /= np.nanmax(self.image) | configures the single color image according to the requested parameters
:return: nothing, just updates self.image |
def updateArray(self, array, indices, value):
lin = np.arange(array.size)
new_array = array.flatten()
new_array[lin[indices]] = value
return new_array.reshape(array.shape) | updates array so that pixels at indices take on value
:param array: (m,n) array to adjust
:param indices: flattened image indices to change value
:param value: new value to assign
:return: the changed (m,n) array |
def onlasso(self, verts):
p = path.Path(verts)
ind = p.contains_points(self.pix, radius=1)
self.history.append(self.selection_array.copy())
self.selection_array = self.updateArray(self.selection_array,
ind,
self.solar_class_var.get())
self.mask.set_data(self.selection_array)
self.fig.canvas.draw_idle() | Main function to control the action of the lasso, allows user to draw on data image and adjust thematic map
:param verts: the vertices selected by the lasso
:return: nothin, but update the selection array so lassoed region now has the selected theme, redraws canvas |
def onpress(self, event):
if event.key == 'c': # clears all the contours
for patch in self.region_patches:
patch.remove()
self.region_patches = []
self.fig.canvas.draw_idle()
elif event.key == "u": # undo a label
self.undobutton_action() | Reacts to key commands
:param event: a keyboard event
:return: if 'c' is pressed, clear all region patches |
def make_options_frame(self):
self.tab_frame = ttk.Notebook(self.option_frame, width=800)
self.tab_configure = tk.Frame(self.tab_frame)
self.tab_classify = tk.Frame(self.tab_frame)
self.make_configure_tab()
self.make_classify_tab()
self.tab_frame.add(self.tab_configure, text="Configure")
self.tab_frame.add(self.tab_classify, text="Classify")
self.tab_frame.pack(fill=tk.BOTH, expand=True) | make the frame that allows for configuration and classification |
def disable_multicolor(self):
# disable the multicolor image
for color in ['red', 'green', 'blue']:
self.multicolorscales[color].config(state=tk.DISABLED, bg='grey')
self.multicolorframes[color].config(bg='grey')
self.multicolorlabels[color].config(bg='grey')
self.multicolordropdowns[color].config(bg='grey', state=tk.DISABLED)
self.multicolorminscale[color].config(bg='grey', state=tk.DISABLED)
self.multicolormaxscale[color].config(bg='grey', state=tk.DISABLED)
# enable the single color
self.singlecolorscale.config(state=tk.NORMAL, bg=self.single_color_theme)
self.singlecolorframe.config(bg=self.single_color_theme)
self.singlecolorlabel.config(bg=self.single_color_theme)
self.singlecolordropdown.config(bg=self.single_color_theme, state=tk.NORMAL)
self.singlecolorminscale.config(bg=self.single_color_theme, state=tk.NORMAL)
self.singlecolormaxscale.config(bg=self.single_color_theme, state=tk.NORMAL) | swap from the multicolor image to the single color image |
def update_button_action(self):
if self.mode.get() == 3: # threecolor
self.configure_threecolor_image()
elif self.mode.get() == 1: # singlecolor
self.configure_singlecolor_image()
else:
raise ValueError("mode can only be singlecolor or threecolor")
self.imageplot.set_data(self.image)
if self.mode.get() == 1: # singlecolor
self.imageplot.set_cmap('gist_gray')
self.fig.canvas.draw_idle() | when update button is clicked, refresh the data preview |
def make_configure_tab(self):
# Setup the choice between single and multicolor
modeframe = tk.Frame(self.tab_configure)
self.mode = tk.IntVar()
singlecolor = tk.Radiobutton(modeframe, text="Single color", variable=self.mode,
value=1, command=lambda: self.disable_multicolor())
multicolor = tk.Radiobutton(modeframe, text="Three color", variable=self.mode,
value=3, command=lambda: self.disable_singlecolor())
self.mode.set(3)
singlecolor.pack(side=tk.LEFT)
multicolor.pack(side=tk.LEFT)
updatebutton = tk.Button(master=modeframe, text="Update",
command=self.update_button_action)
updatebutton.pack(side=tk.RIGHT)
modeframe.grid(row=0, column=0)
self.setup_multicolor()
self.setup_singlecolor() | initial set up of configure tab |
def make_classify_tab(self):
self.pick_frame = tk.Frame(self.tab_classify)
self.pick_frame2 = tk.Frame(self.tab_classify)
self.solar_class_var = tk.IntVar()
self.solar_class_var.set(0) # initialize to unlabeled
buttonnum = 0
frame = [self.pick_frame, self.pick_frame2]
for text, value in self.config.solar_classes:
b = tk.Radiobutton(frame[buttonnum % 2], text=text,
variable=self.solar_class_var,
value=value, background=self.config.solar_colors[text],
indicatoron=0, width=50, height=2, command=self.change_class)
b.pack(fill=tk.BOTH, expand=1)
buttonnum += 1
self.pick_frame.grid(row=0, column=0, rowspan=5, sticky=tk.W + tk.E + tk.N + tk.S)
self.pick_frame2.grid(row=0, column=1, rowspan=5, sticky=tk.W + tk.E + tk.N + tk.S)
undobutton = tk.Button(master=self.tab_classify, text="Undo",
command=self.undobutton_action)
undobutton.grid(row=6, column=0, columnspan=2, sticky=tk.W + tk.E) | initial set up of classification tab |
def undobutton_action(self):
if len(self.history) > 1:
old = self.history.pop(-1)
self.selection_array = old
self.mask.set_data(old)
self.fig.canvas.draw_idle() | when undo is clicked, revert the thematic map to the previous state |
def change_class(self):
self.toolbarcenterframe.config(text="Draw: {}".format(self.config.solar_class_name[self.solar_class_var.get()])) | "on changing the classification label, update the "draw" text |
def draw_circle(self, center, radius, array, value, mode="set"):
ri, ci = draw.circle(center[0], center[1],
radius=radius,
shape=array.shape)
if mode == "add":
array[ri, ci] += value
elif mode == "set":
array[ri, ci] = value
else:
raise ValueError("draw_circle mode must be 'set' or 'add' but {} used".format(mode))
return ri, ci, array[ri,ci] | Draws a circle of specified radius on the input array and fills it with specified value
:param center: a tuple for the center of the circle
:type center: tuple (x,y)
:param radius: how many pixels in radius the circle is
:type radius: int
:param array: image to draw circle on
:type array: size (m,n) numpy array
:param value: what value to fill the circle with
:type value: float
:param mode: if "set" will assign the circle interior value, if "add" will add the value to the circle interior,
throws exception otherwise
:type mode: string, either "set" or "add"
:return: updates input array |
def draw_annulus(self, center, inner_radius, outer_radius, array, value, mode="set"):
if mode == "add":
self.draw_circle(center, outer_radius, array, value)
self.draw_circle(center, inner_radius, array, -value)
elif mode == "set":
ri, ci, existing = self.draw_circle(center, inner_radius, array, -value)
self.draw_circle(center, outer_radius, array, value)
array[ri, ci] = existing
else:
raise ValueError("draw_annulus mode must be 'set' or 'add' but {} used".format(mode)) | Draws an annulus of specified radius on the input array and fills it with specified value
:param center: a tuple for the center of the annulus
:type center: tuple (x,y)
:param inner_radius: how many pixels in radius the interior empty circle is, where the annulus begins
:type inner_radius: int
:param outer_radius: how many pixels in radius the larger outer circle is, where the annulus ends
:typde outer_radius: int
:param array: image to draw annulus on
:type array: size (m,n) numpy array
:param value: what value to fill the annulus with
:type value: float
:param mode: if "set" will assign the circle interior value, if "add" will add the value to the circle interior,
throws exception otherwise
:type mode: string, either "set" or "add"
:return: updates input array and then returns it with the annulus coordinates as a tuple |
def draw_default(self, inside=5, outside=15):
# fill everything with empty outer space
if 'outer_space' in self.config.solar_class_index:
self.selection_array[:, :] = self.config.solar_class_index['outer_space']
elif 'empty_outer_space' in self.config.solar_class_index:
self.selection_array[:, :] = self.config.solar_class_index['empty_outer_space']
else:
raise ValueError("outer_space or empty_outer_space must be classes with colors.")
# draw the limb label in its location
self.draw_annulus((self.cx, self.cy),
self.sun_radius_pixel - inside,
self.sun_radius_pixel + outside,
self.selection_array,
self.config.solar_class_index['limb'])
# draw quiet sun in its location
self.draw_circle((self.cx, self.cy),
self.sun_radius_pixel - inside,
self.selection_array,
self.config.solar_class_index['quiet_sun']) | Draw suggested sun disk, limb, and empty background
:param inside: how many pixels from the calculated solar disk edge to go inward for the limb
:param outside: how many pixels from the calculated solar disk edge to go outward for the limb
:return: updates the self.selection_array |
def values(self):
self.vals['nfft'] = self.ui.nfftSpnbx.value()
self.vals['window'] = str(self.ui.windowCmbx.currentText()).lower()
self.vals['overlap'] = self.ui.overlapSpnbx.value()
return self.vals | Gets the parameter values
:returns: dict of inputs:
| *'nfft'*: int -- length, in samples, of FFT chunks
| *'window'*: str -- name of window to apply to FFT chunks
| *'overlap'*: float -- percent overlap of windows |
def run(config, max_jobs, output=sys.stdout, job_type='local',
report_type='text', shell='/bin/bash', temp='.metapipe', run_now=False):
if max_jobs == None:
max_jobs = cpu_count()
parser = Parser(config)
try:
command_templates = parser.consume()
except ValueError as e:
raise SyntaxError('Invalid config file. \n%s' % e)
options = '\n'.join(parser.global_options)
queue_type = QUEUE_TYPES[report_type]
pipeline = Runtime(command_templates,queue_type,JOB_TYPES,job_type,max_jobs)
template = env.get_template('output_script.tmpl.sh')
with open(temp, 'wb') as f:
pickle.dump(pipeline, f, 2)
script = template.render(shell=shell,
temp=os.path.abspath(temp), options=options)
if run_now:
output = output if output != sys.stdout else PIPELINE_ALIAS
submit_job = make_submit_job(shell, output, job_type)
submit_job.submit()
try:
f = open(output, 'w')
output = f
except TypeError:
pass
output.write(script)
f.close() | Create the metapipe based on the provided input. |
def make_submit_job(shell, output, job_type):
run_cmd = [shell, output]
submit_command = Command(alias=PIPELINE_ALIAS, cmds=run_cmd)
submit_job = get_job(submit_command, job_type)
submit_job.make()
return submit_job | Preps the metapipe main job to be submitted. |
def yaml(modules_to_register: Iterable[Any] = None, classes_to_register: Iterable[Any] = None) -> ruamel.yaml.YAML:
# Defein a round-trip yaml object for us to work with. This object should be imported by other modules
# NOTE: "typ" is a not a typo. It stands for "type"
yaml = ruamel.yaml.YAML(typ = "rt")
# Register representers and constructors
# Numpy
yaml.representer.add_representer(np.ndarray, numpy_to_yaml)
yaml.constructor.add_constructor("!numpy_array", numpy_from_yaml)
# Register external classes
yaml = register_module_classes(yaml = yaml, modules = modules_to_register)
yaml = register_classes(yaml = yaml, classes = classes_to_register)
return yaml | Create a YAML object for loading a YAML configuration.
Args:
modules_to_register: Modules containing classes to be registered with the YAML object. Default: None.
classes_to_register: Classes to be registered with the YAML object. Default: None.
Returns:
A newly creating YAML object, configured as apporpirate. |
def register_classes(yaml: ruamel.yaml.YAML, classes: Optional[Iterable[Any]] = None) -> ruamel.yaml.YAML:
# Validation
if classes is None:
classes = []
# Register the classes
for cls in classes:
logger.debug(f"Registering class {cls} with YAML")
yaml.register_class(cls)
return yaml | Register externally defined classes. |
def register_module_classes(yaml: ruamel.yaml.YAML, modules: Optional[Iterable[Any]] = None) -> ruamel.yaml.YAML:
# Validation
if modules is None:
modules = []
# Extract the classes from the modules
classes_to_register = set()
for module in modules:
module_classes = [member[1] for member in inspect.getmembers(module, inspect.isclass)]
classes_to_register.update(module_classes)
# Register the extracted classes
return register_classes(yaml = yaml, classes = classes_to_register) | Register all classes in the given modules with the YAML object.
This is a simple helper function. |
def numpy_to_yaml(representer: Representer, data: np.ndarray) -> Sequence[Any]:
return representer.represent_sequence(
"!numpy_array",
data.tolist()
) | Write a numpy array to YAML.
It registers the array under the tag ``!numpy_array``.
Use with:
.. code-block:: python
>>> yaml = ruamel.yaml.YAML()
>>> yaml.representer.add_representer(np.ndarray, yaml.numpy_to_yaml)
Note:
We cannot use ``yaml.register_class`` because it won't register the proper type.
(It would register the type of the class, rather than of `numpy.ndarray`). Instead,
we use the above approach to register this method explicitly with the representer. |
def numpy_from_yaml(constructor: Constructor, data: ruamel.yaml.nodes.SequenceNode) -> np.ndarray:
# Construct the contained values so that we properly construct int, float, etc.
# We just leave this to YAML because it already stores this information.
values = [constructor.construct_object(n) for n in data.value]
logger.debug(f"{data}, {values}")
return np.array(values) | Read an array from YAML to numpy.
It reads arrays registered under the tag ``!numpy_array``.
Use with:
.. code-block:: python
>>> yaml = ruamel.yaml.YAML()
>>> yaml.constructor.add_constructor("!numpy_array", yaml.numpy_from_yaml)
Note:
We cannot use ``yaml.register_class`` because it won't register the proper type.
(It would register the type of the class, rather than of `numpy.ndarray`). Instead,
we use the above approach to register this method explicitly with the representer. |
def enum_to_yaml(cls: Type[T_EnumToYAML], representer: Representer, data: T_EnumToYAML) -> ruamel.yaml.nodes.ScalarNode:
return representer.represent_scalar(
f"!{cls.__name__}",
f"{str(data)}"
) | Encodes YAML representation.
This is a mixin method for writing enum values to YAML. It needs to be added to the enum
as a classmethod. See the module docstring for further information on this approach and how
to implement it.
This method writes whatever is used in the string representation of the YAML value.
Usually, this will be the unique name of the enumeration value. If the name is used,
the corresponding ``EnumFromYAML`` mixin can be used to recreate the value. If the name
isn't used, more care may be necessary, so a ``from_yaml`` method for that particular
enumeration may be necessary.
Note:
This method assumes that the name of the enumeration value should be stored as a scalar node.
Args:
representer: Representation from YAML.
data: Enumeration value to be encoded.
Returns:
Scalar representation of the name of the enumeration value. |
def enum_from_yaml(cls: Type[T_EnumFromYAML], constructor: Constructor, node: ruamel.yaml.nodes.ScalarNode) -> T_EnumFromYAML:
# mypy doesn't like indexing to construct the enumeration.
return cls[node.value] | Decode YAML representation.
This is a mixin method for reading enum values from YAML. It needs to be added to the enum
as a classmethod. See the module docstring for further information on this approach and how
to implement it.
Note:
This method assumes that the name of the enumeration value was stored as a scalar node.
Args:
constructor: Constructor from the YAML object.
node: Scalar node extracted from the YAML being read.
Returns:
The constructed YAML value from the name of the enumerated value. |
def is_error(self):
try:
if self._task.is_alive():
if len(self._task.stderr.readlines()) > 0:
self._task.join()
self._write_log()
return True
except AttributeError:
pass
return False | Checks to see if the job errored out. |
def _get_current_ids(self, source=True, meta=True, spectra=True, spectra_annotation=True):
# get the cursor for the database connection
c = self.c
# Get the last uid for the spectra_info table
if source:
c.execute('SELECT max(id) FROM library_spectra_source')
last_id_origin = c.fetchone()[0]
if last_id_origin:
self.current_id_origin = last_id_origin + 1
else:
self.current_id_origin = 1
if meta:
c.execute('SELECT max(id) FROM library_spectra_meta')
last_id_meta = c.fetchone()[0]
if last_id_meta:
self.current_id_meta = last_id_meta + 1
else:
self.current_id_meta = 1
if spectra:
c.execute('SELECT max(id) FROM library_spectra')
last_id_spectra = c.fetchone()[0]
if last_id_spectra:
self.current_id_spectra = last_id_spectra + 1
else:
self.current_id_spectra = 1
if spectra_annotation:
c.execute('SELECT max(id) FROM library_spectra_annotation')
last_id_spectra_annotation = c.fetchone()[0]
if last_id_spectra_annotation:
self.current_id_spectra_annotation = last_id_spectra_annotation + 1
else:
self.current_id_spectra_annotation = 1 | Get the current id for each table in the database
Args:
source (boolean): get the id for the table "library_spectra_source" will update self.current_id_origin
meta (boolean): get the id for the table "library_spectra_meta" will update self.current_id_meta
spectra (boolean): get the id for the table "library_spectra" will update self.current_id_spectra
spectra_annotation (boolean): get the id for the table "library_spectra_annotation" will update
self.current_id_spectra_annotation |
def _parse_files(self, msp_pth, chunk, db_type, celery_obj=False):
if os.path.isdir(msp_pth):
c = 0
for folder, subs, files in sorted(os.walk(msp_pth)):
for msp_file in sorted(files):
msp_file_pth = os.path.join(folder, msp_file)
if os.path.isdir(msp_file_pth) or not msp_file_pth.lower().endswith(('txt', 'msp')):
continue
print('MSP FILE PATH', msp_file_pth)
self.num_lines = line_count(msp_file_pth)
# each file is processed separately but we want to still process in chunks so we save the number
# of spectra currently being processed with the c variable
with open(msp_file_pth, "r") as f:
c = self._parse_lines(f, chunk, db_type, celery_obj, c)
else:
self.num_lines = line_count(msp_pth)
with open(msp_pth, "r") as f:
self._parse_lines(f, chunk, db_type, celery_obj)
self.insert_data(remove_data=True, db_type=db_type) | Parse the MSP files and insert into database
Args:
msp_pth (str): path to msp file or directory [required]
db_type (str): The type of database to submit to (either 'sqlite', 'mysql' or 'django_mysql') [required]
chunk (int): Chunks of spectra to parse data (useful to control memory usage) [required]
celery_obj (boolean): If using Django a Celery task object can be used to keep track on ongoing tasks
[default False] |
def _parse_lines(self, f, chunk, db_type, celery_obj=False, c=0):
old = 0
for i, line in enumerate(f):
line = line.rstrip()
if i == 0:
old = self.current_id_meta
self._update_libdata(line)
if self.current_id_meta > old:
old = self.current_id_meta
c += 1
if c > chunk:
if celery_obj:
celery_obj.update_state(state='current spectra {}'.format(str(i)),
meta={'current': i, 'total': self.num_lines})
print(self.current_id_meta)
self.insert_data(remove_data=True, db_type=db_type)
self.update_source = False
c = 0
return c | Parse the MSP files and insert into database
Args:
f (file object): the opened file object
db_type (str): The type of database to submit to (either 'sqlite', 'mysql' or 'django_mysql') [required]
chunk (int): Chunks of spectra to parse data (useful to control memory usage) [required]
celery_obj (boolean): If using Django a Celery task object can be used to keep track on ongoing tasks
[default False]
c (int): Number of spectra currently processed (will reset to 0 after that chunk of spectra has been
inserted into the database |
def get_compound_ids(self):
cursor = self.conn.cursor()
cursor.execute('SELECT inchikey_id FROM metab_compound')
self.conn.commit()
for row in cursor:
if not row[0] in self.compound_ids:
self.compound_ids.append(row[0]) | Extract the current compound ids in the database. Updates the self.compound_ids list |
def _store_compound_info(self):
other_name_l = [name for name in self.other_names if name != self.compound_info['name']]
self.compound_info['other_names'] = ' <#> '.join(other_name_l)
if not self.compound_info['inchikey_id']:
self._set_inchi_pcc(self.compound_info['pubchem_id'], 'cid', 0)
if not self.compound_info['inchikey_id']:
self._set_inchi_pcc(self.compound_info['smiles'], 'smiles', 0)
if not self.compound_info['inchikey_id']:
self._set_inchi_pcc(self.compound_info['name'], 'name', 0)
if not self.compound_info['inchikey_id']:
print('WARNING, cant get inchi key for ', self.compound_info)
print(self.meta_info)
print('#########################')
self.compound_info['inchikey_id'] = 'UNKNOWN_' + str(uuid.uuid4())
if not self.compound_info['pubchem_id'] and self.compound_info['inchikey_id']:
self._set_inchi_pcc(self.compound_info['inchikey_id'], 'inchikey', 0)
if not self.compound_info['name']:
self.compound_info['name'] = 'unknown name'
if not self.compound_info['inchikey_id'] in self.compound_ids:
self.compound_info_all.append(tuple(self.compound_info.values()) + (
str(datetime.datetime.now()),
str(datetime.datetime.now()),
))
self.compound_ids.append(self.compound_info['inchikey_id']) | Update the compound_info dictionary with the current chunk of compound details
Note that we use the inchikey as unique identifier. If we can't find an appropiate inchikey we just use
a random string (uuid4) suffixed with UNKNOWN |
def _store_meta_info(self):
# In the mass bank msp files, sometimes the precursor_mz is missing but we have the neutral mass and
# the precursor_type (e.g. adduct) so we can calculate the precursor_mz
if not self.meta_info['precursor_mz'] and self.meta_info['precursor_type'] and \
self.compound_info['exact_mass']:
self.meta_info['precursor_mz'] = get_precursor_mz(float(self.compound_info['exact_mass']),
self.meta_info['precursor_type'])
if not self.meta_info['polarity']:
# have to do special check for polarity (as sometimes gets missed)
m = re.search('^\[.*\](\-|\+)', self.meta_info['precursor_type'], re.IGNORECASE)
if m:
polarity = m.group(1).strip()
if polarity == '+':
self.meta_info['polarity'] = 'positive'
elif polarity == '-':
self.meta_info['polarity'] = 'negative'
if not self.meta_info['accession']:
self.meta_info['accession'] = 'unknown accession'
self.meta_info_all.append(
(str(self.current_id_meta),) +
tuple(self.meta_info.values()) +
(str(self.current_id_origin), self.compound_info['inchikey_id'],)
) | Update the meta dictionary with the current chunk of meta data details |
def _parse_spectra_annotation(self, line):
if re.match('^PK\$NUM_PEAK(.*)', line, re.IGNORECASE):
self.start_spectra_annotation = False
return
saplist = line.split()
sarow = (
self.current_id_spectra_annotation,
float(saplist[self.spectra_annotation_indexes['m/z']]) if 'm/z' in self.spectra_annotation_indexes else None,
saplist[self.spectra_annotation_indexes[
'tentative_formula']] if 'tentative_formula' in self.spectra_annotation_indexes else None,
float(saplist[self.spectra_annotation_indexes[
'mass_error(ppm)']]) if 'mass_error(ppm)' in self.spectra_annotation_indexes else None,
self.current_id_meta)
self.spectra_annotation_all.append(sarow)
self.current_id_spectra_annotation += 1 | Parse and store the spectral annotation details |
def _parse_spectra(self, line):
if line in ['\n', '\r\n', '//\n', '//\r\n', '', '//']:
self.start_spectra = False
self.current_id_meta += 1
self.collect_meta = True
return
splist = line.split()
if len(splist) > 2 and not self.ignore_additional_spectra_info:
additional_info = ''.join(map(str, splist[2:len(splist)]))
else:
additional_info = ''
srow = (
self.current_id_spectra, float(splist[0]), float(splist[1]), additional_info,
self.current_id_meta)
self.spectra_all.append(srow)
self.current_id_spectra += 1 | Parse and store the spectral details |
def _set_inchi_pcc(self, in_str, pcp_type, elem):
if not in_str:
return 0
try:
pccs = pcp.get_compounds(in_str, pcp_type)
except pcp.BadRequestError as e:
print(e)
return 0
except pcp.TimeoutError as e:
print(e)
return 0
except pcp.ServerError as e:
print(e)
return 0
except URLError as e:
print(e)
return 0
except BadStatusLine as e:
print(e)
return 0
if pccs:
pcc = pccs[elem]
self.compound_info['inchikey_id'] = pcc.inchikey
self.compound_info['pubchem_id'] = pcc.cid
self.compound_info['molecular_formula'] = pcc.molecular_formula
self.compound_info['molecular_weight'] = pcc.molecular_weight
self.compound_info['exact_mass'] = pcc.exact_mass
self.compound_info['smiles'] = pcc.canonical_smiles
if len(pccs) > 1:
print('WARNING, multiple compounds for ', self.compound_info) | Check pubchem compounds via API for both an inchikey and any available compound details |
def _get_other_names(self, line):
m = re.search(self.compound_regex['other_names'][0], line, re.IGNORECASE)
if m:
self.other_names.append(m.group(1).strip()) | Parse and extract any other names that might be recorded for the compound
Args:
line (str): line of the msp file |
def _parse_meta_info(self, line):
if self.mslevel:
self.meta_info['ms_level'] = self.mslevel
if self.polarity:
self.meta_info['polarity'] = self.polarity
for k, regexes in six.iteritems(self.meta_regex):
for reg in regexes:
m = re.search(reg, line, re.IGNORECASE)
if m:
self.meta_info[k] = m.group(1).strip() | Parse and extract all meta data by looping through the dictionary of meta_info regexs
updates self.meta_info
Args:
line (str): line of the msp file |
def _parse_compound_info(self, line):
for k, regexes in six.iteritems(self.compound_regex):
for reg in regexes:
if self.compound_info[k]:
continue
m = re.search(reg, line, re.IGNORECASE)
if m:
self.compound_info[k] = m.group(1).strip()
self._get_other_names(line) | Parse and extract all compound data by looping through the dictionary of compound_info regexs
updates self.compound_info
Args:
line (str): line of the msp file |
def line(line_def, **kwargs):
def replace(s):
return "(%s)" % ansi.aformat(s.group()[1:], attrs=["bold", ])
return ansi.aformat(
re.sub('@.?', replace, line_def),
**kwargs) | Highlights a character in the line |
def try_and_error(*funcs):
def validate(value):
exc = None
for func in funcs:
try:
return func(value)
except (ValueError, TypeError) as e:
exc = e
raise exc
return validate | Apply multiple validation functions
Parameters
----------
``*funcs``
Validation functions to test
Returns
-------
function |
def validate_text(value):
possible_transform = ['axes', 'fig', 'data']
validate_transform = ValidateInStrings('transform', possible_transform,
True)
tests = [validate_float, validate_float, validate_str,
validate_transform, dict]
if isinstance(value, six.string_types):
xpos, ypos = rcParams['texts.default_position']
return [(xpos, ypos, value, 'axes', {'ha': 'right'})]
elif isinstance(value, tuple):
value = [value]
try:
value = list(value)[:]
except TypeError:
raise ValueError("Value must be string or list of tuples!")
for i, val in enumerate(value):
try:
val = tuple(val)
except TypeError:
raise ValueError(
"Text must be an iterable of the form "
"(x, y, s[, trans, params])!")
if len(val) < 3:
raise ValueError(
"Text tuple must at least be like [x, y, s], with floats x, "
"y and string s!")
elif len(val) == 3 or isinstance(val[3], dict):
val = list(val)
val.insert(3, 'data')
if len(val) == 4:
val += [{}]
val = tuple(val)
if len(val) > 5:
raise ValueError(
"Text tuple must not be longer then length 5. It can be "
"like (x, y, s[, trans, params])!")
value[i] = (validate(x) for validate, x in zip(tests, val))
return value | Validate a text formatoption
Parameters
----------
value: see :attr:`psyplot.plotter.labelplotter.text`
Raises
------
ValueError |
def validate_none(b):
if isinstance(b, six.string_types):
b = b.lower()
if b is None or b == 'none':
return None
else:
raise ValueError('Could not convert "%s" to None' % b) | Validate that None is given
Parameters
----------
b: {None, 'none'}
None or string (the case is ignored)
Returns
-------
None
Raises
------
ValueError |
def validate_axiscolor(value):
validate = try_and_error(validate_none, validate_color)
possible_keys = {'right', 'left', 'top', 'bottom'}
try:
value = dict(value)
false_keys = set(value) - possible_keys
if false_keys:
raise ValueError("Wrong keys (%s)!" % (', '.join(false_keys)))
for key, val in value.items():
value[key] = validate(val)
except:
value = dict(zip(possible_keys, repeat(validate(value))))
return value | Validate a dictionary containing axiscolor definitions
Parameters
----------
value: dict
see :attr:`psyplot.plotter.baseplotter.axiscolor`
Returns
-------
dict
Raises
------
ValueError |
def validate_cbarpos(value):
patt = 'sh|sv|fl|fr|ft|fb|b|r'
if value is True:
value = {'b'}
elif not value:
value = set()
elif isinstance(value, six.string_types):
for s in re.finditer('[^%s]+' % patt, value):
warn("Unknown colorbar position %s!" % s.group(), RuntimeWarning)
value = set(re.findall(patt, value))
else:
value = validate_stringset(value)
for s in (s for s in value
if not re.match(patt, s)):
warn("Unknown colorbar position %s!" % s)
value.remove(s)
return value | Validate a colorbar position
Parameters
----------
value: bool or str
A string can be a combination of 'sh|sv|fl|fr|ft|fb|b|r'
Returns
-------
list
list of strings with possible colorbar positions
Raises
------
ValueError |
def validate_cmap(val):
from matplotlib.colors import Colormap
try:
return validate_str(val)
except ValueError:
if not isinstance(val, Colormap):
raise ValueError(
"Could not find a valid colormap!")
return val | Validate a colormap
Parameters
----------
val: str or :class:`mpl.colors.Colormap`
Returns
-------
str or :class:`mpl.colors.Colormap`
Raises
------
ValueError |
def validate_cmaps(cmaps):
cmaps = {validate_str(key): validate_colorlist(val) for key, val in cmaps}
for key, val in six.iteritems(cmaps):
cmaps.setdefault(key + '_r', val[::-1])
return cmaps | Validate a dictionary of color lists
Parameters
----------
cmaps: dict
a mapping from a colormap name to a list of colors
Raises
------
ValueError
If one of the values in `cmaps` is not a color list
Notes
-----
For all items (listname, list) in `cmaps`, the reversed list is
automatically inserted with the ``listname + '_r'`` key. |
def validate_lineplot(value):
if value is None:
return value
elif isinstance(value, six.string_types):
return six.text_type(value)
else:
value = list(value)
for i, v in enumerate(value):
if v is None:
pass
elif isinstance(v, six.string_types):
value[i] = six.text_type(v)
else:
raise ValueError('Expected None or string, found %s' % (v, ))
return value | Validate the value for the LinePlotter.plot formatoption
Parameters
----------
value: None, str or list with mixture of both
The value to validate |
def validate_err_calc(val):
try:
val = validate_float(val)
except (ValueError, TypeError):
pass
else:
if val <= 100 and val >= 0:
return val
raise ValueError("Percentiles for the error calculation must lie "
"between 0 and 100, not %s" % val)
try:
val = ValidateList(float, 2)(val)
except (ValueError, TypeError):
pass
else:
if all((v <= 100 and v >= 0) for v in val):
return val
raise ValueError("Percentiles for the error calculation must lie "
"between 0 and 100, not %s" % val)
try:
val = validate_str(val)
except ValueError:
pass
else:
if 'std' not in val:
raise ValueError(
'A string for the error calculation must contain std!')
return val | Validation function for the
:attr:`psy_simple.plotter.FldmeanPlotter.err_calc` formatoption |
def visit_GpxModel(self, gpx_model, *args, **kwargs):
result = OrderedDict()
put_scalar = lambda name, json_name=None: self.optional_attribute_scalar(result, gpx_model, name, json_name)
put_list = lambda name, json_name=None: self.optional_attribute_list(result, gpx_model, name, json_name)
put_scalar('creator')
put_scalar('metadata')
put_list('waypoints')
put_list('routes')
put_list('tracks')
put_list('extensions')
return result | Render a GPXModel as a single JSON structure. |
def visit_Metadata(self, metadata, *args, **kwargs):
result = OrderedDict()
put_scalar = lambda name, json_name=None: self.optional_attribute_scalar(result, metadata, name, json_name)
put_list = lambda name, json_name=None: self.optional_attribute_list(result, metadata, name, json_name)
put_scalar('name')
put_scalar('description')
put_scalar('author')
put_scalar('copyright')
put_list('links')
put_scalar('time')
put_scalar('keywords')
put_scalar('bounds')
put_list('extensions')
return result | Render GPX Metadata as a single JSON structure. |
def has_option(section, name):
cfg = ConfigParser.SafeConfigParser({"working_dir": "/tmp", "debug": "0"})
cfg.read(CONFIG_LOCATIONS)
return cfg.has_option(section, name) | Wrapper around ConfigParser's ``has_option`` method. |
def get(section, name):
cfg = ConfigParser.SafeConfigParser({"working_dir": "/tmp", "debug": "0"})
cfg.read(CONFIG_LOCATIONS)
val = cfg.get(section, name)
return val.strip("'").strip('"') | Wrapper around ConfigParser's ``get`` method. |
def run(**options):
with Dotfile(options) as conf:
if conf['context'] is None:
msg = "No context file has been provided"
LOGGER.error(msg)
raise RuntimeError(msg)
if not os.path.exists(conf['context_path']):
msg = "Context file {} not found".format(conf['context_path'])
LOGGER.error(msg)
raise RuntimeError(msg)
LOGGER.info(
(
"{{dockerstache}}: In: {}\n"
"{{dockerstache}}: Out: {}\n"
"{{dockerstache}}: Context: {}\n"
"{{dockerstache}}: Defaults: {}\n"
).format(conf['input'], conf['output'], conf['context'], conf['defaults'])
)
context = Context(conf['context'], conf['defaults'])
context.load()
if 'extend_context' in options:
LOGGER.info("{{dockerstache}} Extended context provided")
context.update(options['extend_context'])
process_templates(
conf['input'],
conf['output'],
context
)
if conf['inclusive']:
process_copies(
conf['input'],
conf['output'],
conf['exclude']
)
return dict(conf) | _run_
Run the dockerstache process to render templates
based on the options provided
If extend_context is passed as options it will be used to
extend the context with the contents of the dictionary provided
via context.update(extend_context) |
def make_key(table_name, objid):
key = datastore.Key()
path = key.path_element.add()
path.kind = table_name
path.name = str(objid)
return key | Create an object key for storage. |
def write_rec(table_name, objid, data, index_name_values):
with DatastoreTransaction() as tx:
entity = tx.get_upsert()
entity.key.CopyFrom(make_key(table_name, objid))
prop = entity.property.add()
prop.name = 'id'
prop.value.string_value = objid
prop = entity.property.add()
prop.name = 'value'
prop.value.string_value = data
for name, val in index_name_values:
prop = entity.property.add()
prop.name = name
prop.value.string_value = str(val) | Write (upsert) a record using a tran. |
def extract_entity(found):
obj = dict()
for prop in found.entity.property:
obj[prop.name] = prop.value.string_value
return obj | Copy found entity to a dict. |
def read_rec(table_name, objid):
req = datastore.LookupRequest()
req.key.extend([make_key(table_name, objid)])
for found in datastore.lookup(req).found:
yield extract_entity(found) | Generator that yields keyed recs from store. |
def read_by_indexes(table_name, index_name_values=None):
req = datastore.RunQueryRequest()
query = req.query
query.kind.add().name = table_name
if not index_name_values:
index_name_values = []
for name, val in index_name_values:
queryFilter = query.filter.property_filter
queryFilter.property.name = name
queryFilter.operator = datastore.PropertyFilter.EQUAL
queryFilter.value.string_value = str(val)
loop_its = 0
have_more = True
while have_more:
resp = datastore.run_query(req)
found_something = False
for found in resp.batch.entity_result:
yield extract_entity(found)
found_something = True
if not found_something:
# This is a guard against bugs or excessive looping - as long we
# can keep yielding records we'll continue to execute
loop_its += 1
if loop_its > 5:
raise ValueError("Exceeded the excessive query threshold")
if resp.batch.more_results != datastore.QueryResultBatch.NOT_FINISHED:
have_more = False
else:
have_more = True
end_cursor = resp.batch.end_cursor
query.start_cursor.CopyFrom(end_cursor) | Index reader. |
def delete_table(table_name):
to_delete = [
make_key(table_name, rec['id'])
for rec in read_by_indexes(table_name, [])
]
with DatastoreTransaction() as tx:
tx.get_commit_req().mutation.delete.extend(to_delete) | Mainly for testing. |
def get_commit_req(self):
if not self.commit_req:
self.commit_req = datastore.CommitRequest()
self.commit_req.transaction = self.tx
return self.commit_req | Lazy commit request getter. |
def find_one(self, cls, id):
db_result = None
for rec in read_rec(cls.get_table_name(), id):
db_result = rec
break # Only read the first returned - which should be all we get
if not db_result:
return None
obj = cls.from_data(db_result['value'])
return obj | Required functionality. |
def find_all(self, cls):
final_results = []
for db_result in read_by_indexes(cls.get_table_name(), []):
obj = cls.from_data(db_result['value'])
final_results.append(obj)
return final_results | Required functionality. |
def find_by_index(self, cls, index_name, value):
table_name = cls.get_table_name()
index_name_vals = [(index_name, value)]
final_results = []
for db_result in read_by_indexes(table_name, index_name_vals):
obj = cls.from_data(db_result['value'])
final_results.append(obj)
return final_results | Required functionality. |
def save(self, obj):
if not obj.id:
obj.id = uuid()
index_names = obj.__class__.index_names() or []
index_dict = obj.indexes() or {}
index_name_values = [
(key, index_dict.get(key, ''))
for key in index_names
]
write_rec(
obj.__class__.get_table_name(),
obj.id,
obj.to_data(),
index_name_values
) | Required functionality. |
def call(command, stdin=None, stdout=subprocess.PIPE, env=os.environ, cwd=None,
shell=False, output_log_level=logging.INFO, sensitive_info=False):
if not sensitive_info:
logger.debug("calling command: %s" % command)
else:
logger.debug("calling command with sensitive information")
try:
args = command if shell else whitespace_smart_split(command)
kw = {}
if not shell and not which(args[0], cwd=cwd):
raise CommandMissingException(args[0])
if shell:
kw['shell'] = True
process = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=stdout,
stderr=subprocess.STDOUT, env=env, cwd=cwd,
**kw)
output = process.communicate(input=stdin)[0]
if output is not None:
try:
logger.log(output_log_level, output.decode('utf-8'))
except UnicodeDecodeError:
pass
return (process.returncode, output)
except OSError:
e = sys.exc_info()[1]
if not sensitive_info:
logger.exception("Error running command: %s" % command)
logger.error("Root directory: %s" % cwd)
if stdin:
logger.error("stdin: %s" % stdin)
raise e | Better, smarter call logic |
def whitespace_smart_split(command):
return_array = []
s = ""
in_double_quotes = False
escape = False
for c in command:
if c == '"':
if in_double_quotes:
if escape:
s += c
escape = False
else:
s += c
in_double_quotes = False
else:
in_double_quotes = True
s += c
else:
if in_double_quotes:
if c == '\\':
escape = True
s += c
else:
escape = False
s += c
else:
if c == ' ':
return_array.append(s)
s = ""
else:
s += c
if s != "":
return_array.append(s)
return return_array | Split a command by whitespace, taking care to not split on
whitespace within quotes.
>>> whitespace_smart_split("test this \\\"in here\\\" again")
['test', 'this', '"in here"', 'again'] |
def skip(stackframe=1):
def trace(frame, event, args):
raise ContextSkipped
sys.settrace(lambda *args, **kwargs: None)
frame = sys._getframe(stackframe + 1)
frame.f_trace = trace | Must be called from within `__enter__()`. Performs some magic to have a
#ContextSkipped exception be raised the moment the with context is entered.
The #ContextSkipped must then be handled in `__exit__()` to suppress the
propagation of the exception.
> Important: This function does not raise an exception by itself, thus
> the `__enter__()` method will continue to execute after using this function. |
def sync(self):
phase = _get_phase(self._formula_instance)
self.logger.info("%s %s..." % (phase.verb.capitalize(), self.feature_name))
message = "...finished %s %s." % (phase.verb, self.feature_name)
result = getattr(self, phase.name)()
if result or phase in (PHASE.INSTALL, PHASE.REMOVE):
self.logger.info(message)
else:
self.logger.debug(message)
return result | execute the steps required to have the
feature end with the desired state. |
def linear_insert(self, item, priority):
with self.lock:
self_data = self.data
rotate = self_data.rotate
maxlen = self._maxlen
length = len(self_data)
count = length
# in practice, this is better than doing a rotate(-1) every
# loop and getting self.data[0] each time only because deque
# implements a very efficient iterator in C
for i in self_data:
if priority > i[1]:
break
count -= 1
rotate(-count)
self_data.appendleft((item, priority))
rotate(length-count)
try:
self.items[item] += 1
except TypeError:
self.items[repr(item)] += 1
if maxlen is not None and maxlen < len(self_data):
self._poplast() | Linear search. Performance is O(n^2). |
def binary_insert(self, item, priority):
with self.lock:
self_data = self.data
rotate = self_data.rotate
maxlen = self._maxlen
length = len(self_data)
index = 0
min = 0
max = length - 1
while max - min > 10:
mid = (min + max) // 2
# If index in 1st half of list
if priority > self_data[mid][1]:
max = mid - 1
# If index in 2nd half of list
else:
min = mid + 1
for i in range(min, max + 1):
if priority > self_data[i][1]:
index = i
break
elif i == max:
index = max + 1
shift = length - index
# Never shift more than half length of depq
if shift > length // 2:
shift = length % shift
rotate(-shift)
self_data.appendleft((item, priority))
rotate(shift)
else:
rotate(shift)
self_data.append((item, priority))
rotate(-shift)
try:
self.items[item] += 1
except TypeError:
self.items[repr(item)] += 1
if maxlen is not None and maxlen < len(self_data):
self._poplast() | Traditional binary search. Performance: O(n log n) |
def isloaded(self, name):
if name is None:
return True
if isinstance(name, str):
return (name in [x.__module__ for x in self])
if isinstance(name, Iterable):
return set(name).issubset([x.__module__ for x in self])
return False | Checks if given hook module has been loaded
Args:
name (str): The name of the module to check
Returns:
bool. The return code::
True -- Loaded
False -- Not Loaded |
def hook(self, function, dependencies=None):
if not isinstance(dependencies, (Iterable, type(None), str)):
raise TypeError("Invalid list of dependencies provided!")
# Tag the function with its dependencies
if not hasattr(function, "__deps__"):
function.__deps__ = dependencies
# If a module is loaded before all its dependencies are loaded, put
# it in _later list and don't load yet
if self.isloaded(function.__deps__):
self.append(function)
else:
self._later.append(function)
# After each module load, retry to resolve dependencies
for ext in self._later:
if self.isloaded(ext.__deps__):
self._later.remove(ext)
self.hook(ext) | Tries to load a hook
Args:
function (func): Function that will be called when the event is called
Kwargs:
dependencies (str): String or Iterable with modules whose hooks should be called before this one
Raises:
:class:TypeError
Note that the dependencies are module-wide, that means that if
`parent.foo` and `parent.bar` are both subscribed to `example` event
and `child` enumerates `parent` as dependcy, **both** `foo` and `bar`
must be called in order for the dependcy to get resolved. |
def parse_from_json(json_str):
try:
message_dict = json.loads(json_str)
except ValueError:
raise ParseError("Mal-formed JSON input.")
upload_keys = message_dict.get('uploadKeys', False)
if upload_keys is False:
raise ParseError(
"uploadKeys does not exist. At minimum, an empty array is required."
)
elif not isinstance(upload_keys, list):
raise ParseError(
"uploadKeys must be an array object."
)
upload_type = message_dict['resultType']
try:
if upload_type == 'orders':
return orders.parse_from_dict(message_dict)
elif upload_type == 'history':
return history.parse_from_dict(message_dict)
else:
raise ParseError(
'Unified message has unknown upload_type: %s' % upload_type)
except TypeError as exc:
# MarketOrder and HistoryEntry both raise TypeError exceptions if
# invalid input is encountered.
raise ParseError(exc.message) | Given a Unified Uploader message, parse the contents and return a
MarketOrderList or MarketHistoryList instance.
:param str json_str: A Unified Uploader message as a JSON string.
:rtype: MarketOrderList or MarketHistoryList
:raises: MalformedUploadError when invalid JSON is passed in. |
def encode_to_json(order_or_history):
if isinstance(order_or_history, MarketOrderList):
return orders.encode_to_json(order_or_history)
elif isinstance(order_or_history, MarketHistoryList):
return history.encode_to_json(order_or_history)
else:
raise Exception("Must be one of MarketOrderList or MarketHistoryList.") | Given an order or history entry, encode it to JSON and return.
:type order_or_history: MarketOrderList or MarketHistoryList
:param order_or_history: A MarketOrderList or MarketHistoryList instance to
encode to JSON.
:rtype: str
:return: The encoded JSON string. |
def event_subscriber(event):
def wrapper(method):
Registry.register_event(event.name, event, method)
return wrapper | Register a method, which gets called when this event triggers.
:param event: the event to register the decorator method on. |
def dispatch_event(event, subject='id'):
def wrapper(method):
def inner_wrapper(*args, **kwargs):
resource = method(*args, **kwargs)
if isinstance(resource, dict):
subject_ = resource.get(subject)
data = resource
else:
subject_ = getattr(resource, subject)
data = resource.__dict__
event(subject_, data).dispatch()
return resource
return inner_wrapper
return wrapper | Dispatch an event when the decorated method is called.
:param event: the event class to instantiate and dispatch.
:param subject_property: the property name to get the subject. |
def add(self, classifier, threshold, begin=None, end=None):
boosted_machine = bob.learn.boosting.BoostedMachine()
if begin is None: begin = 0
if end is None: end = len(classifier.weak_machines)
for i in range(begin, end):
boosted_machine.add_weak_machine(classifier.weak_machines[i], classifier.weights[i])
self.cascade.append(boosted_machine)
self.thresholds.append(threshold)
self._indices() | Adds a new strong classifier with the given threshold to the cascade.
**Parameters:**
classifier : :py:class:`bob.learn.boosting.BoostedMachine`
A strong classifier to add
``threshold`` : float
The classification threshold for this cascade step
``begin``, ``end`` : int or ``None``
If specified, only the weak machines with the indices ``range(begin,end)`` will be added. |
def create_from_boosted_machine(self, boosted_machine, classifiers_per_round, classification_thresholds=-5.):
indices = list(range(0, len(boosted_machine.weak_machines), classifiers_per_round))
if indices[-1] != len(boosted_machine.weak_machines): indices.append(len(boosted_machine.weak_machines))
self.cascade = []
self.indices = []
for i in range(len(indices)-1):
machine = bob.learn.boosting.BoostedMachine()
for index in range(indices[i], indices[i+1]):
machine.add_weak_machine(boosted_machine.weak_machines[index], boosted_machine.weights[index, 0])
self.cascade.append(machine)
if isinstance(classification_thresholds, (int, float)):
self.thresholds = [classification_thresholds] * len(self.cascade)
else:
self.thresholds = classification_thresholds | Creates this cascade from the given boosted machine, by simply splitting off strong classifiers that have classifiers_per_round weak classifiers.
**Parameters:**
``boosted_machine`` : :py:class:`bob.learn.boosting.BoostedMachine`
The strong classifier to split into a regular cascade.
``classifiers_per_round`` : int
The number of classifiers that each cascade step should contain.
``classification_threshold`` : float
A single threshold that will be applied in all rounds of the cascade. |
def generate_boosted_machine(self):
strong = bob.learn.boosting.BoostedMachine()
for machine, index in zip(self.cascade, self.indices):
weak = machine.weak_machines
weights = machine.weights
for i in range(len(weak)):
strong.add_weak_machine(weak[i], weights[i])
return strong | generate_boosted_machine() -> strong
Creates a single strong classifier from this cascade by concatenating all strong classifiers.
**Returns:**
``strong`` : :py:class:`bob.learn.boosting.BoostedMachine`
The strong classifier as a combination of all classifiers in this cascade. |
def save(self, hdf5):
# write the cascade to file
hdf5.set("Thresholds", self.thresholds)
for i in range(len(self.cascade)):
hdf5.create_group("Classifier_%d" % (i+1))
hdf5.cd("Classifier_%d" % (i+1))
self.cascade[i].save(hdf5)
hdf5.cd("..")
hdf5.create_group("FeatureExtractor")
hdf5.cd("FeatureExtractor")
self.extractor.save(hdf5)
hdf5.cd("..") | Saves this cascade into the given HDF5 file.
**Parameters:**
``hdf5`` : :py:class:`bob.io.base.HDF5File`
An HDF5 file open for writing |
def load(self, hdf5):
# write the cascade to file
self.thresholds = hdf5.read("Thresholds")
self.cascade = []
for i in range(len(self.thresholds)):
hdf5.cd("Classifier_%d" % (i+1))
self.cascade.append(bob.learn.boosting.BoostedMachine(hdf5))
hdf5.cd("..")
hdf5.cd("FeatureExtractor")
self.extractor = FeatureExtractor(hdf5)
hdf5.cd("..")
self._indices() | Loads this cascade from the given HDF5 file.
**Parameters:**
``hdf5`` : :py:class:`bob.io.base.HDF5File`
An HDF5 file open for reading |
def check(ctx, repository, config):
ctx.obj = Repo(repository=repository, config=config) | Check commits. |
def message(obj, commit='HEAD', skip_merge_commits=False):
from ..kwalitee import check_message
options = obj.options
repository = obj.repository
if options.get('colors') is not False:
colorama.init(autoreset=True)
reset = colorama.Style.RESET_ALL
yellow = colorama.Fore.YELLOW
green = colorama.Fore.GREEN
red = colorama.Fore.RED
else:
reset = yellow = green = red = ''
try:
sha = 'oid'
commits = _pygit2_commits(commit, repository)
except ImportError:
try:
sha = 'hexsha'
commits = _git_commits(commit, repository)
except ImportError:
click.echo('To use this feature, please install pygit2. '
'GitPython will also work but is not recommended '
'(python <= 2.7 only).',
file=sys.stderr)
return 2
template = '{0}commit {{commit.{1}}}{2}\n\n'.format(yellow, sha, reset)
template += '{message}{errors}'
count = 0
ident = ' '
re_line = re.compile('^', re.MULTILINE)
for commit in commits:
if skip_merge_commits and _is_merge_commit(commit):
continue
message = commit.message
errors = check_message(message, **options)
message = re.sub(re_line, ident, message)
if errors:
count += 1
errors.insert(0, red)
else:
errors = [green, 'Everything is OK.']
errors.append(reset)
click.echo(template.format(commit=commit,
message=message.encode('utf-8'),
errors='\n'.join(errors)))
if min(count, 1):
raise click.Abort | Check the messages of the commits. |
def get_obj_subcmds(obj):
subcmds = []
for label in dir(obj.__class__):
if label.startswith("_"):
continue
if isinstance(getattr(obj.__class__, label, False), property):
continue
rvalue = getattr(obj, label)
if not callable(rvalue) or not is_cmd(rvalue):
continue
if isinstance(obj, types.MethodType) and \
label in ("im_func", "im_self", "im_class"):
continue
## potential command
command_name = getattr(rvalue, "command_name",
label[:-1] if label.endswith("_") else
label)
subcmds.append((command_name, rvalue))
return OrderedDict(subcmds) | Fetch action in callable attributes which and commands
Callable must have their attribute 'command' set to True to
be recognised by this lookup.
Please consider using the decorator ``@cmd`` to declare your
subcommands in classes for instance. |
def get_module_resources(mod):
path = os.path.dirname(os.path.realpath(mod.__file__))
prefix = kf.basename(mod.__file__, (".py", ".pyc"))
if not os.path.exists(mod.__file__):
import pkg_resources
for resource_name in pkg_resources.resource_listdir(mod.__name__, ''):
if resource_name.startswith("%s_" % prefix) and resource_name.endswith(".py"):
module_name, _ext = os.path.splitext(kf.basename(resource_name))
yield module_name
for f in glob.glob(os.path.join(path, '%s_*.py' % prefix)):
module_name, _ext = os.path.splitext(kf.basename(f))
yield module_name | Return probed sub module names from given module |