code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def show_slug_with_level(context, page, lang=None, fallback=True): if not lang: lang = context.get('lang', pages_settings.PAGE_DEFAULT_LANGUAGE) page = get_page_from_string_or_id(page, lang) if not page: return '' return {'content': page.slug_with_level(lang)}
Display slug with level by language.
def show_revisions(context, page, content_type, lang=None): if not pages_settings.PAGE_CONTENT_REVISION: return {'revisions': None} revisions = Content.objects.filter(page=page, language=lang, type=content_type).order_by('-creation_date') if len(revisions) < 2: return {'revisions': None} return {'revisions': revisions[0:10]}
Render the last 10 revisions of a page content with a list using the ``pages/revisions.html`` template
def do_videoplaceholder(parser, token): name, params = parse_placeholder(parser, token) return VideoPlaceholderNode(name, **params)
Method that parse the imageplaceholder template tag.
def do_get_pages_with_tag(parser, token): bits = token.split_contents() if 4 != len(bits): raise TemplateSyntaxError('%r expects 2 arguments' % bits[0]) if bits[-2] != 'as': raise TemplateSyntaxError( '%r expects "as" as the second last argument' % bits[0]) varname = bits[-1] tag = parser.compile_filter(bits[1]) varname = bits[-1] return GetPagesWithTagNode(tag, varname)
Return Pages with given tag Syntax:: {% get_pages_with_tag <tag name> as <varname> %} Example use: {% get_pages_with_tag "footer" as pages %}
def fix_raw_path(path): double_path_separator = PATH_SEPARATOR + PATH_SEPARATOR while path.find( double_path_separator) >= 0: # there are double separators path = path.replace(double_path_separator, PATH_SEPARATOR) # remove double path separator if is_folder(path) and not path.endswith("/"): path = path + "/" return path
Prettify name of path :param path: path to fix :return: Good name for path
def remove_year(name): for i in range(len( name) - 3): # last index is length - 3 - 1 = length - 4 if name[i: i + 4].isdigit(): name = name[:i] + name[i + 4:] return remove_year( name) # if there is a removal, start again return name
Removes year from input :param name: path to edit :return: inputs with no years
def remove_brackets(name): name = re.sub( r"([(\[]).*?([)\]])", r"\g<1>\g<2>", name ) # remove anything in between brackets brackets = "()[]{}" # list of brackets for bracket in brackets: name = name.replace(bracket, "") return name
Removes brackets form input :param name: path to fix :return: inputs with no brackets
def extract_name_max_chars(name, max_chars=64, blank=" "): new_name = name.strip() if len(new_name) > max_chars: new_name = new_name[:max_chars] # get at most 64 chars if new_name.rfind(blank) > 0: new_name = new_name[:new_name.rfind(blank)] # nearest word return new_name
Extracts max chars in name truncated to nearest word :param name: path to edit :param max_chars: max chars of new name :param blank: char that represents the blank between words :return: Name edited to contain at most max_chars
def prettify(name, blank=" "): if name.startswith("."): # remove starting name = name[1:] for bad_char in BAD_CHARS: name = name.replace(bad_char, blank) # remove token name = String(name).remove_all(blank) for i in range(1, len(name) - 2): try: are_blanks = name[i - 1] == blank and name[i + 1] == blank if are_blanks and name[i] in BAD_CHARS: name = name[:i - 1] + name[i + 2:] except: # out of bounds pass if name.startswith(blank): name = name[1:] if name.endswith(blank): # remove ending replacement name = name[:-1] return name
Prettify name of path :param name: path Name: to edit :param blank: default blanks in name :return: Prettier name from given one: replace bad chars with good ones
def get_parent_folder_name(file_path): return os.path.split(os.path.split(os.path.abspath(file_path))[0])[-1]
Finds parent folder of file :param file_path: path :return: Name of folder container
def ls_dir(path, include_hidden=False): lst = [] for file in os.listdir(path): hidden_file = FileSystem(file).is_hidden() if (hidden_file and include_hidden) or (not hidden_file): lst.append(os.path.join(path, file)) return list(set(lst))
Finds content of folder :param path: directory to get list of files and folders :param include_hidden: True iff include hidden files in list :return: List of paths in given directory
def ls_recurse(path, include_hidden=False): lst = [] for file in os.listdir(path): hidden_file = FileSystem(file).is_hidden() if (hidden_file and include_hidden) or (not hidden_file): lst.append(os.path.join(path, file)) if is_folder(os.path.join(path, file)): lst += ls_recurse( os.path.join(path, file), include_hidden=include_hidden ) # get list of files in directory return list(set(lst))
Finds content of folder recursively :param path: directory to get list of files and folders :param include_hidden: True iff include hidden files in list :return: List of paths in given directory recursively
def list_content(path, recurse, include_hidden=False): if recurse: return ls_recurse(path, include_hidden=include_hidden) return ls_dir(path, include_hidden=include_hidden)
Finds content of folder (recursively) :param path: directory to get list of files and folders :param recurse: True iff recurse into subdirectories or not :param include_hidden: True iff include hidden files in list :return: List of paths in given directory recursively
def is_russian(self): russian_chars = 0 for char in RUSSIAN_CHARS: if char in self.name: russian_chars += 1 # found a russian char return russian_chars > len(RUSSIAN_CHARS) / 2.0
Checks if file path is russian :return: True iff document has a russian name
def rename(self, new_path): rename_path = fix_raw_path(new_path) if is_folder(self.path): os.rename(self.path, rename_path) else: os.renames(self.path, rename_path)
Renames to new path :param new_path: new path to use
def setClass(self, factoryclass): self.factoryclass = factoryclass self.setText(str(factoryclass.name))
Sets the constructor for the component type this label is to represent :param factoryclass: a class that, when called, results in an instance of the desired class :type factoryclass: callable
def mouseMoveEvent(self, event): if (event.pos() - self.dragStartPosition).manhattanLength() < 10: return QtGui.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.ClosedHandCursor)) factory = self.factoryclass() mimeData = QtCore.QMimeData() try: mimeData.setData("application/x-protocol", factory.serialize()) except: mimeData.setData("application/x-protocol", cPickle.dumps(factory)) drag = QtGui.QDrag(self) drag.setMimeData(mimeData) pixmap = QtGui.QPixmap() pixmap = pixmap.grabWidget(self, self.frameRect()) # below makes the pixmap half transparent # painter = QtGui.QPainter(pixmap) # painter.setCompositionMode(painter.CompositionMode_DestinationIn) # painter.fillRect(pixmap.rect(), QtGui.QColor(0, 0, 0, 127)) # painter.end() drag.setPixmap(pixmap) drag.setHotSpot(QtCore.QPoint(pixmap.width()/2, pixmap.height()/2)) drag.setPixmap(pixmap) self.dragActive.emit(True) result = drag.exec_(QtCore.Qt.MoveAction) QtGui.QApplication.restoreOverrideCursor()
Determines if a drag is taking place, and initiates it
def columnclean(column): cleanedcolumn = str(column) \ .replace('%', 'percent') \ .replace('(', '_') \ .replace(')', '') \ .replace('As', 'Adenosines') \ .replace('Cs', 'Cytosines') \ .replace('Gs', 'Guanines') \ .replace('Ts', 'Thymines') \ .replace('Ns', 'Unknowns') \ .replace('index', 'adapterIndex') return cleanedcolumn
Modifies column header format to be importable into a database :param column: raw column header :return: cleanedcolumn: reformatted column header
def getLabel(self, key): axisItem = self.getPlotItem().axes[key]['item'] return axisItem.label.toPlainText()
Gets the label assigned to an axes :param key:??? :type key: str
def updateData(self, axeskey, x, y): if axeskey == 'stim': self.stimPlot.setData(x,y) # call manually to ajust placement of signal ranges = self.viewRange() self.rangeChange(self, ranges) if axeskey == 'response': self.clearTraces() if self._traceUnit == 'A': y = y * self._ampScalar if self.zeroAction.isChecked(): start_avg = np.mean(y[5:25]) y = y - start_avg self.tracePlot.setData(x,y*self._polarity)
Replaces the currently displayed data :param axeskey: name of data plot to update. Valid options are 'stim' or 'response' :type axeskey: str :param x: index values associated with y to plot :type x: numpy.ndarray :param y: values to plot at x :type y: numpy.ndarray
def appendData(self, axeskey, bins, ypoints): if axeskey == 'raster' and len(bins) > 0: x, y = self.rasterPlot.getData() # don't plot overlapping points bins = np.unique(bins) # adjust repetition number to response scale ypoints = np.ones_like(bins)*self.rasterYslots[ypoints[0]] x = np.append(x, bins) y = np.append(y, ypoints) self.rasterPlot.setData(x, y)
Appends data to existing plotted data :param axeskey: name of data plot to update. Valid options are 'stim' or 'response' :type axeskey: str :param bins: bins to plot a point for :type bin: numpy.ndarray :param ypoints: iteration number of raster, *should* match bins dimension, but really takes the first value in array for iteration number and plot row at proper place for included bins :type ypoints: numpy.ndarray
def setThreshold(self, threshold): self.threshLine.setValue(threshold) self.threshold_field.setValue(threshold)
Sets the current threshold :param threshold: the y value to set the threshold line at :type threshold: float
def setRasterBounds(self, lims): self.rasterBottom = lims[0] self.rasterTop = lims[1] self.updateRasterBounds()
Sets the raster plot y-axis bounds, where in the plot the raster will appear between :param lims: the (min, max) y-values for the raster plot to be placed between :type lims: (float, float)
def updateRasterBounds(self): yrange = self.viewRange()[1] yrange_size = yrange[1] - yrange[0] rmax = self.rasterTop*yrange_size + yrange[0] rmin = self.rasterBottom*yrange_size + yrange[0] self.rasterYslots = np.linspace(rmin, rmax, self.nreps) self.rasterBoundsUpdated.emit((self.rasterBottom, self.rasterTop), self.getTitle())
Updates the y-coordinate slots where the raster points are plotted, according to the current limits of the y-axis
def askRasterBounds(self): dlg = RasterBoundsDialog(bounds= (self.rasterBottom, self.rasterTop)) if dlg.exec_(): bounds = dlg.values() self.setRasterBounds(bounds)
Prompts the user to provide the raster bounds with a dialog. Saves the bounds to be applied to the plot
def rangeChange(self, pw, ranges): if hasattr(ranges, '__iter__'): # adjust the stim signal so that it falls in the correct range yrange_size = ranges[1][1] - ranges[1][0] stim_x, stim_y = self.stimPlot.getData() if stim_y is not None: stim_height = yrange_size*STIM_HEIGHT # take it to 0 stim_y = stim_y - np.amin(stim_y) # normalize if np.amax(stim_y) != 0: stim_y = stim_y/np.amax(stim_y) # scale for new size stim_y = stim_y*stim_height # raise to right place in plot stim_y = stim_y + (ranges[1][1] - (stim_height*1.1 + (stim_height*0.2))) self.stimPlot.setData(stim_x, stim_y) # rmax = self.rasterTop*yrange_size + ranges[1][0] # rmin = self.rasterBottom*yrange_size + ranges[1][0] self.updateRasterBounds()
Adjusts the stimulus signal to keep it at the top of a plot, after any ajustment to the axes ranges takes place. This is a slot for the undocumented pyqtgraph signal sigRangeChanged. From what I can tell the arguments are: :param pw: reference to the emitting object (plot widget in my case) :type pw: object :param ranges: I am only interested when this turns out to be a nested list of axis bounds :type ranges: object
def update_thresh(self): thresh_val = self.threshLine.value() self.threshold_field.setValue(thresh_val) self.thresholdUpdated.emit(thresh_val, self.getTitle())
Emits a Qt signal thresholdUpdated with the current threshold value
def fromFile(self, fname): spec, f, bins, dur = audiotools.spectrogram(fname, **self.specgramArgs) self.updateImage(spec, bins, f) return dur
Displays a spectrogram of an audio file. Supported formats see :func:`sparkle.tools.audiotools.audioread` :param fname: file path of the audiofile to display :type fname: str :returns: float -- duration of audio recording (seconds)
def updateImage(self, imgdata, xaxis=None, yaxis=None): imgdata = imgdata.T self.img.setImage(imgdata) if xaxis is not None and yaxis is not None: xscale = 1.0/(imgdata.shape[0]/xaxis[-1]) yscale = 1.0/(imgdata.shape[1]/yaxis[-1]) self.resetScale() self.img.scale(xscale, yscale) self.imgScale = (xscale, yscale) self.imageArray = np.fliplr(imgdata) self.updateColormap()
Updates the Widget image directly. :type imgdata: numpy.ndarray, see :meth:`pyqtgraph:pyqtgraph.ImageItem.setImage` :param xaxis: x-axis values, length should match dimension 1 of imgdata :param yaxis: y-axis values, length should match dimension 0 of imgdata
def resetScale(self): self.img.scale(1./self.imgScale[0], 1./self.imgScale[1]) self.imgScale = (1.,1.)
Resets the scale on this image. Correctly aligns time scale, undoes manual scaling
def updateData(self, signal, fs): # use a separate thread to calculate spectrogram so UI doesn't lag t = threading.Thread(target=_doSpectrogram, args=(self.spec_done, (fs, signal),), kwargs=self.specgramArgs) t.start()
Displays a spectrogram of the provided signal :param signal: 1-D signal of audio :type signal: numpy.ndarray :param fs: samplerate of signal :type fs: int
def setSpecArgs(**kwargs): for key, value in kwargs.items(): if key == 'colormap': SpecWidget.imgArgs['lut'] = value['lut'] SpecWidget.imgArgs['levels'] = value['levels'] SpecWidget.imgArgs['state'] = value['state'] for w in SpecWidget.instances: w.updateColormap() else: SpecWidget.specgramArgs[key] = value
Sets optional arguments for the spectrogram appearance. Available options: :param nfft: size of FFT window to use :type nfft: int :param overlap: percent overlap of window :type overlap: number :param window: Type of window to use, choices are hanning, hamming, blackman, bartlett or none (rectangular) :type window: string :param colormap: Gets set by colormap editor. Holds the information to generate the colormap. Items: :meth:`lut<pyqtgraph:pyqtgraph.ImageItem.setLookupTable>`, :meth:`levels<pyqtgraph:pyqtgraph.ImageItem.setLevels>`, state (info for editor) :type colormap: dict
def clearImg(self): self.img.setImage(np.array([[0]])) self.img.image = None
Clears the current image
def editColormap(self): self.editor = pg.ImageView() # remove the ROI and Norm buttons self.editor.ui.roiBtn.setVisible(False) self.editor.ui.menuBtn.setVisible(False) self.editor.setImage(self.imageArray) if self.imgArgs['state'] is not None: self.editor.getHistogramWidget().item.gradient.restoreState(self.imgArgs['state']) self.editor.getHistogramWidget().item.setLevels(*self.imgArgs['levels']) self.editor.closeEvent = self._editor_close self.editor.setWindowModality(QtCore.Qt.ApplicationModal) self.editor.show()
Prompts the user with a dialog to change colormap
def updateColormap(self): if self.imgArgs['lut'] is not None: self.img.setLookupTable(self.imgArgs['lut']) self.img.setLevels(self.imgArgs['levels'])
Updates the currently colormap accoring to stored settings
def appendData(self, xdata, ydata, color='b', legendstr=None): item = self.plot(xdata, ydata, pen=color) if legendstr is not None: self.legend.addItem(item, legendstr) return item
Adds the data to the plot :param xdata: index values for data, plotted on x-axis :type xdata: numpy.ndarray :param ydata: value data to plot, dimension must match xdata :type ydata: numpy.ndarray
def setLabels(self, xlabel=None, ylabel=None, title=None, xunits=None, yunits=None): if xlabel is not None: self.setLabel('bottom', xlabel, units=xunits) if ylabel is not None: self.setLabel('left', ylabel, units=yunits) if title is not None: self.setTitle(title)
Sets the plot labels :param xlabel: X-axis label (do not include units) :type xlabel: str :param ylabel: Y-axis label (do not include units) :type ylabel: str :param title: Plot title :type title: str :param xunit: SI units for the x-axis. An appropriate label will be appended according to scale :type xunit: str :param yunit: SI units for the y-axis. An appropriate label will be appended according to scale :type yunit: str
def setPoint(self, x, group, y): if x == -1: # silence window self.plot([0],[y], symbol='o') else: yindex = self.groups.index(group) xdata, ydata = self.lines[yindex].getData() if ydata is None: xdata = [x] ydata = [y] else: xdata = np.append(xdata, x) ydata = np.append(ydata, y) self.lines[yindex].setData(xdata, ydata)
Sets the given point, connects line to previous point in group :param x: x value of point :type x: float :param group: group which plot point for :type group: float :param y: y value of point :type y: float
def setLabels(self, name): if name == "calibration": self.setWindowTitle("Calibration Curve") self.setTitle("Calibration Curve") self.setLabel('bottom', "Frequency", units='Hz') self.setLabel('left', 'Recorded Intensity (dB SPL)') elif name == "tuning": self.setWindowTitle("Tuning Curve") self.setTitle("Tuning Curve") self.setLabel('bottom', "Frequency", units="Hz") self.setLabel('left', "Spike Count (mean)") else: self.setWindowTitle("Spike Counts") self.setTitle("Spike Counts") self.setLabel('bottom', "Test Number", units='') self.setLabel('left', "Spike Count (mean)", units='')
Sets plot labels, according to predefined options :param name: The type of plot to create labels for. Options: calibration, tuning, anything else labels for spike counts :type name: str
def loadCurve(data, groups, thresholds, absvals, fs, xlabels): xlims = (xlabels[0], xlabels[-1]) pw = ProgressWidget(groups, xlims) spike_counts = [] # skip control for itrace in range(data.shape[0]): count = 0 for ichan in range(data.shape[2]): flat_reps = data[itrace,:,ichan,:].flatten() count += len(spikestats.spike_times(flat_reps, thresholds[ichan], fs, absvals[ichan])) spike_counts.append(count/(data.shape[1]*data.shape[2])) #mean spikes per rep i = 0 for g in groups: for x in xlabels: pw.setPoint(x, g, spike_counts[i]) i +=1 return pw
Accepts a data set from a whole test, averages reps and re-creates the progress plot as the same as it was during live plotting. Number of thresholds must match the size of the channel dimension
def setBins(self, bins): self._bins = bins self._counts = np.zeros_like(self._bins) bar_width = bins[0]*1.5 self.histo.setOpts(x=bins, height=self._counts, width=bar_width) self.setXlim((0, bins[-1]))
Sets the bin centers (x values) :param bins: time bin centers :type bins: numpy.ndarray
def clearData(self): self._counts = np.zeros_like(self._bins) self.histo.setOpts(height=self._counts)
Clears all histograms (keeps bins)
def appendData(self, bins, repnum=None): # only if the last sample was above threshold, but last-1 one wasn't bins[bins >= len(self._counts)] = len(self._counts) -1 bin_totals = np.bincount(bins) self._counts[:len(bin_totals)] += bin_totals self.histo.setOpts(height=np.array(self._counts))
Increases the values at bins (indexes) :param bins: bin center values to increment counts for, to increment a time bin more than once include multiple items in list with that bin center value :type bins: numpy.ndarray
def processData(self, times, response, test_num, trace_num, rep_num): # invert polarity affects spike counting response = response * self._polarity if rep_num == 0: # reset self.spike_counts = [] self.spike_latencies = [] self.spike_rates = [] fs = 1./(times[1] - times[0]) # process response; calculate spike times spike_times = spikestats.spike_times(response, self._threshold, fs) self.spike_counts.append(len(spike_times)) if len(spike_times) > 0: self.spike_latencies.append(spike_times[0]) else: self.spike_latencies.append(np.nan) self.spike_rates.append(spikestats.firing_rate(spike_times, times)) binsz = self._bins[1] - self._bins[0] response_bins = spikestats.bin_spikes(spike_times, binsz) # self.putnotify('spikes_found', (response_bins, rep_num)) self.appendData(response_bins, rep_num)
Calulate spike times from raw response data
def setSr(self, fs): self.tracePlot.setSr(fs) self.stimPlot.setSr(fs)
Sets the samplerate of the input operation being plotted
def setWindowSize(self, winsz): self.tracePlot.setWindowSize(winsz) self.stimPlot.setWindowSize(winsz)
Sets the size of scroll window
def addPlot(self, xdata, ydata, xlabel=None, ylabel=None, title=None, xunits=None, yunits=None): p = SimplePlotWidget(xdata, ydata) p.setLabels(xlabel, ylabel, title, xunits, yunits) # self.plots.append(p) self.stacker.addWidget(p)
Adds a new plot for the given set of data and/or labels, Generates a SimplePlotWidget :param xdata: index values for data, plotted on x-axis :type xdata: numpy.ndarray :param ydata: value data to plot, dimension must match xdata :type ydata: numpy.ndarray
def addSpectrogram(self, ydata, fs, title=None): p = SpecWidget() p.updateData(ydata, fs) if title is not None: p.setTitle(title) self.stacker.addWidget(p)
Adds a new spectorgram plot for the given image. Generates a SpecWidget :param ydata: 2-D array of the image to display :type ydata: numpy.ndarray :param fs: the samplerate of the signal in the image, used to set time/ frequency scale :type fs: int :param title: Plot title :type title: str
def nextPlot(self): if self.stacker.currentIndex() < self.stacker.count(): self.stacker.setCurrentIndex(self.stacker.currentIndex()+1)
Moves the displayed plot to the next one
def prevPlot(self): if self.stacker.currentIndex() > 0: self.stacker.setCurrentIndex(self.stacker.currentIndex()-1)
Moves the displayed plot to the previous one
def most_even_chunk(string, group): counts = [0] + most_even(len(string), group) indices = accumulate(counts) slices = window(indices, 2) return [string[slice(*one)] for one in slices]
Divide a string into a list of strings as even as possible.
def most_even(number, group): count, rest = divmod(number, group) counts = zip_longest([count] * group, [1] * rest, fillvalue=0) chunks = [sum(one) for one in counts] logging.debug('chunks: %s', chunks) return chunks
Divide a number into a list of numbers as even as possible.
def window(seq, count=2): iseq = iter(seq) result = tuple(islice(iseq, count)) if len(result) == count: yield result for elem in iseq: result = result[1:] + (elem,) yield result
Slide window.
def _get_modules(path): lst = [] folder_contents = os.listdir(path) is_python_module = "__init__.py" in folder_contents if is_python_module: for file in folder_contents: full_path = os.path.join(path, file) if is_file(full_path): lst.append(full_path) if is_folder(full_path): lst += _get_modules(full_path) # recurse in folder return list(set(lst))
Finds modules in folder recursively :param path: directory :return: list of modules
def get_modules(folder, include_meta=False): files = [ file for file in _get_modules(folder) if is_file(file) # just files ] if not include_meta: files = [ file for file in files if not Document(file).name.startswith("__") ] return files
Finds modules (recursively) in folder :param folder: root folder :param include_meta: whether include meta files like (__init__ or __version__) :return: list of modules
def _parse(self): with open(self.path, "rt") as reader: return ast.parse(reader.read(), filename=self.path)
Parses file contents :return: Tree hierarchy of file
def _find_package(self, root_package): package = self.path.replace(root_package, "") if package.endswith(".py"): package = package[:-3] package = package.replace(os.path.sep, MODULE_SEP) root_package = get_folder_name(root_package) package = root_package + package # add root return package
Finds package name of file :param root_package: root package :return: package name
def _get_instances(self, instance): return [ x for x in self.tree.body if isinstance(x, instance) ]
Finds all instances of instance in tree :param instance: type of object :return: list of objects in tree of same instance
def get_classes(self): instances = self._get_instances(ast.ClassDef) instances = [ PyClass(instance, self.package) for instance in instances ] return instances
Finds classes in file :return: list of top-level classes
def get_functions(self): instances = self._get_instances(ast.FunctionDef) instances = [ PyFunction(instance, self.package) for instance in instances ] return instances
Finds top-level functions in file :return: list of top-level functions
def get_functions(self, include_meta=False): instances = self._get_instances(ast.FunctionDef) instances = [ PyFunction(instance, self.full_package) # fix package name for instance in instances ] if not include_meta: instances = [ instance # fix package name for instance in instances if not instance.get_name().startswith("__") ] return instances
Finds top-level functions in file :param include_meta: whether include meta functions like (__init__) :return: list of top-level functions
def reads(err_log): # Initialise variables num_reads = 0 paired_reads = 0 # Open the log file with open(err_log, 'r') as error_log: # Extract the necessary information for line in error_log: if 'Pairs:' in line: num_reads = line.split('\t')[-1].rstrip() elif 'Joined:' in line: paired_reads = line.split('\t')[-2].rstrip() return num_reads, paired_reads
Parse the outputs from bbmerge to extract the total number of reads, as well as the number of reads that could be paired :param err_log: bbmerge outputs the stats in the error file :return: num_reads, the total number of reads, paired_reads, number of paired readds
def best_assemblyfile(self): for sample in self.metadata: try: # Set the name of the filtered assembly file filtered_outputfile = os.path.join(self.path, 'raw_assemblies', '{}.fasta'.format(sample.name)) # Set the name of the unfiltered spades assembly output file if os.path.isfile(sample.general.assemblyfile): size = os.path.getsize(sample.general.assemblyfile) # Ensure that the assembly isn't just an empty file if size == 0: sample.general.bestassemblyfile = 'NA' else: sample.general.bestassemblyfile = sample.general.assemblyfile shutil.copyfile(sample.general.bestassemblyfile, filtered_outputfile) else: sample.general.bestassemblyfile = 'NA' # Add the name and path of the filtered file to the metadata sample.general.filteredfile = filtered_outputfile except AttributeError: sample.general.assemblyfile = 'NA' sample.general.bestassemblyfile = 'NA'
Determine whether the contigs.fasta output file from the assembler is present. If not, set the .bestassembly attribute to 'NA'
def groups(self): if not self._groups: self._groups = ComponentGroups(self.api_client) return self._groups
Component groups Special property which point to a :class:`~pylls.cachet.ComponentGroups` instance for convenience. This instance is initialized on first call.
def get(self, component_id=None, **kwargs): path = 'components' if component_id is not None: path += '/%s' % component_id return self.paginate_get(path, data=kwargs)
Get components :param component_id: Component ID (optional) :return: Components data (:class:`Generator`) Additional named arguments may be passed and are directly transmitted to API. It is useful to use the API search features. .. seealso:: https://docs.cachethq.io/reference#get-components .. seealso:: https://docs.cachethq.io/docs/advanced-api-usage
def create(self, name, status, description="", link="", order=0, group_id=0, enabled=True): data = ApiParams() data['name'] = name data['status'] = status data['description'] = description data['link'] = link data['order'] = order data['group_id'] = group_id data['enabled'] = enabled return self._post('components', data=data)['data']
Create a new component :param str name: Name of the component :param int status: Status of the component; 1-4 :param str description: Description of the component (optional) :param str link: A hyperlink to the component (optional) :param int order: Order of the component (optional) :param int group_id: The group ID of the component (optional) :param bool enabled: Whether the component is enabled (optional) :return: Created component data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#components .. seealso:: https://docs.cachethq.io/docs/component-statuses
def update(self, component_id, name=None, status=None, description=None, link=None, order=None, group_id=None, enabled=True): data = ApiParams() data['component'] = component_id data['name'] = name data['status'] = status data['description'] = description data['link'] = link data['order'] = order data['group_id'] = group_id data['enabled'] = enabled return self._put('components/%s' % component_id, data=data)['data']
Update a component :param int component_id: Component ID :param str name: Name of the component (optional) :param int status: Status of the component; 1-4 :param str description: Description of the component (optional) :param str link: A hyperlink to the component (optional) :param int order: Order of the component (optional) :param int group_id: The group ID of the component (optional) :param bool enabled: Whether the component is enabled (optional) :return: Updated component data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#components .. seealso:: https://docs.cachethq.io/docs/component-statuses
def get(self, group_id=None, **kwargs): path = 'components/groups' if group_id is not None: path += '/%s' % group_id return self.paginate_get(path, data=kwargs)
Get component groups :param group_id: Component group ID (optional) :return: Component groups data (:class:`dict`) Additional named arguments may be passed and are directly transmitted to API. It is useful to use the API search features. .. seealso:: https://docs.cachethq.io/reference#get-componentgroups .. seealso:: https://docs.cachethq.io/docs/advanced-api-usage
def create(self, name, order=None, collapsed=None): data = ApiParams() data['name'] = name data['order'] = order data['collapsed'] = collapsed return self._post('components/groups', data=data)['data']
Create a new Component Group :param str name: Name of the component group :param int order: Order of the component group :param int collapsed: Collapse the group? 0-2 :return: Created component group data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#post-componentgroups
def update(self, group_id, name=None, order=None, collapsed=None): data = ApiParams() data['group'] = group_id data['name'] = name data['order'] = order data['collapsed'] = collapsed return self._put('components/groups/%s' % group_id, data=data)['data']
Update a Component Group :param int group_id: Component Group ID :param str name: Name of the component group :param int order: Order of the group :param int collapsed: Collapse the group? :return: Updated component group data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#put-component-group
def get(self, incident_id=None, **kwargs): path = 'incidents' if incident_id is not None: path += '/%s' % incident_id return self.paginate_get(path, data=kwargs)
Get incidents :param int incident_id: :return: Incidents data (:class:`dict`) Additional named arguments may be passed and are directly transmitted to API. It is useful to use the API search features. .. seealso:: https://docs.cachethq.io/reference#get-incidents .. seealso:: https://docs.cachethq.io/docs/advanced-api-usage
def create(self, name, message, status, visible, component_id=None, component_status=None, notify=None, created_at=None, template=None, tplvars=None): data = ApiParams() data['name'] = name data['message'] = message data['status'] = status data['visible'] = visible data['component_id'] = component_id data['component_status'] = component_status data['notify'] = notify data['created_at'] = created_at data['template'] = template data['vars'] = tplvars return self._post('incidents', data=data)['data']
Create a new Incident :param str name: Name of the incident :param str message: Incident explanation message :param int status: Status of the incident :param int visible: Whether the incident is publicly visible :param int component_id: Component to update :param int component_status: The status to update the given component :param bool notify: Whether to notify subscribers :param str created_at: When the incident was created :param str template: The template slug to use :param list tplvars: The variables to pass to the template :return: Created incident data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#incidents
def update(self, incident_id, name=None, message=None, status=None, visible=None, component_id=None, component_status=None, notify=None, created_at=None, template=None, tpl_vars=None): data = ApiParams() data['name'] = name data['message'] = message data['status'] = status data['visible'] = visible data['component_id'] = component_id data['component_status'] = component_status data['notify'] = notify data['created_at'] = created_at data['template'] = template data['vars'] = tpl_vars return self._put('incidents/%s' % incident_id, data=data)['data']
Update an Incident :param int incident_id: Incident ID :param str name: Name of the incident :param str message: Incident explanation message :param int status: Status of the incident :param int visible: Whether the incident is publicly visible :param int component_id: Component to update :param int component_status: The status to update the given component :param bool notify: Whether to notify subscribers :param str created_at: When the incident was created :param str template: The template slug to use :param list tpl_vars: The variables to pass to the template :return: Created incident data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#update-an-incident
def points(self): if not self._points: self._points = MetricPoints(self.api_client) return self._points
Metric points Special property which point to a :class:`~pylls.cachet.MetricPoints` instance for convenience. This instance is initialized on first call.
def get(self, metric_id=None, **kwargs): path = 'metrics' if metric_id is not None: path += '/%s' % metric_id return self.paginate_get(path, data=kwargs)
Get metrics :param int metric_id: Metric ID :return: Metrics data (:class:`dict`) Additional named arguments may be passed and are directly transmitted to API. It is useful to use the API search features. .. seealso:: https://docs.cachethq.io/reference#get-metrics .. seealso:: https://docs.cachethq.io/docs/advanced-api-usage
def create(self, name, suffix, description, default_value, display=None): data = ApiParams() data['name'] = name data['suffix'] = suffix data['description'] = description data['default_value'] = default_value data['display'] = display return self._post('metrics', data=data)['data']
Create a new Metric :param str name: Name of metric :param str suffix: Metric unit :param str description: Description of what the metric is measuring :param int default_value: Default value to use when a point is added :param int display: Display the chart on the status page :return: Created metric data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#metrics
def create(self, metric_id, value, timestamp=None): data = ApiParams() data['value'] = value data['timestamp'] = timestamp return self._post('metrics/%s/points' % metric_id, data=data)['data']
Add a Metric Point to a Metric :param int metric_id: Metric ID :param int value: Value to plot on the metric graph :param str timestamp: Unix timestamp of the point was measured :return: Created metric point data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#post-metric-points
def create(self, email, verify=None, components=None): data = ApiParams() data['email'] = email data['verify'] = verify data['components'] = components return self._post('subscribers', data=data)['data']
Create a new subscriber :param str email: Email address to subscribe :param bool verify: Whether to send verification email :param list components: Components ID list, defaults to all :return: Created subscriber data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#subscribers
def annotatedcore(self): logging.info('Calculating annotated core') # Determine the total number of core genes self.total_core() # Iterate through all the samples, and process all Escherichia for sample in self.metadata: if sample.general.bestassemblyfile != 'NA': # Create a set to store the names of all the core genes in this strain sample[self.analysistype].coreset = set() if sample.general.referencegenus == 'Escherichia': # Add the Escherichia sample to the runmetadata self.runmetadata.samples.append(sample) # Parse the BLAST report try: report = sample[self.analysistype].report self.blastparser(report=report, sample=sample, fieldnames=self.fieldnames) except KeyError: sample[self.analysistype].coreset = list() # Create the report self.reporter()
Calculates the core genome of organisms using custom databases
def total_core(self): corefile = os.path.join(self.reffilepath, self.analysistype, 'Escherichia', 'core_combined.fasta') for record in SeqIO.parse(corefile, 'fasta'): gene_name = record.id.split('-')[0] if gene_name not in self.coregenomes: self.coregenomes.append(gene_name)
Determine the total number of core genes present
def blastparser(self, report, sample, fieldnames): try: # Open the sequence profile file as a dictionary blastdict = DictReader(open(report), fieldnames=self.fieldnames, dialect='excel-tab') # Go through each BLAST result for row in blastdict: # Ignore the headers if row['query_id'].startswith(fieldnames[0]): pass else: # Calculate the percent identity and extract the bitscore from the row # Percent identity is the (length of the alignment - number of mismatches) / total subject length percentidentity = float('{:0.2f}'.format((float(row['positives']) - float(row['gaps'])) / float(row['subject_length']) * 100)) # Split off any | and - from the sample name target = row['subject_id'].split('|')[0].split('-')[0] # If the hit passes the cutoff threshold, add it to the set of core genes present if percentidentity >= self.cutoff: sample[self.analysistype].coreset.add(target) except FileNotFoundError: pass
Parse the number of core genes present in the strain from the BLAST outputs :param report: the name and path of the BLAST outputs :param sample: the sample object :param fieldnames: type LIST: List of fields used to in BLAST analyses
def reporter(self): with open(os.path.join(self.reportpath, 'Escherichia_core.csv'), 'w') as report: data = 'Strain,Genes Present/Total\n' for sample in self.runmetadata.samples: # Convert the set to a list for JSON serialization sample[self.analysistype].coreset = list(sample[self.analysistype].coreset) sample[self.analysistype].coreresults = '{cs}/{cg}'.format(cs=len(sample[self.analysistype].coreset), cg=len(self.coregenomes)) # Add strain name, the number of core genes present, and the number of total core genes to the string data += '{sn},{cr}\n'.format(sn=sample.name, cr=sample[self.analysistype].coreresults) report.write(data) for sample in self.metadata: # Remove the messy blast results and set/list of core genes from the object try: delattr(sample[self.analysistype], "blastresults") except AttributeError: pass try: delattr(sample[self.analysistype], 'coreset') except AttributeError: pass
Create a .csv file with the strain name, and the number of core genes present/the total number of core genes
def get_simple_output(self, stderr=STDOUT): args = shlex.split(self.cmd) proc = Popen(args, stdout=PIPE, stderr=stderr) return proc.communicate()[0].decode("utf8")
Executes a simple external command and get its output The command contains no pipes. Error messages are redirected to the standard output by default :param stderr: where to put stderr :return: output of command
def get_complex_output(self, stderr=STDOUT): proc = Popen(self.cmd, shell=True, stdout=PIPE, stderr=stderr) return proc.stdout.readlines()
Executes a piped command and get the lines of the output in a list :param stderr: where to put stderr :return: output of command
def get_output_from_pipe(self, input_file): args = shlex.split(self.cmd) p = Popen(args, stdout=PIPE, stdin=PIPE) # | grep es p.stdin.write(bytearray(input_file.encode("utf8"))) # echo test | return p.communicate()[0].decode("utf8")
Executes an external command and get its output. The command receives its input_file from the stdin through a pipe :param input_file: input file :return: output of command
def get_return_code(self, stderr=STDOUT): args = shlex.split(self.cmd) return call(args, stdout=PIPE, stderr=stderr)
Executes a simple external command and return its exit status :param stderr: where to put stderr :return: return code of command
def get_exit_code(self): args = shlex.split(self.cmd) proc = Popen(args, stdout=PIPE, stderr=PIPE) out, err = proc.communicate() out, err = out.decode("utf8"), err.decode("utf8") exitcode = proc.returncode # return exitcode, out, err
Executes the external command and get its exitcode, stdout and stderr :return: exit code of command
def execute_in_background(self): # http://stackoverflow.com/questions/1605520 args = shlex.split(self.cmd) p = Popen(args) return p.pid
Executes a (shell) command in the background :return: the process' pid
def keep_alive(self): while True: pid = self.execute_in_background() p = psutil.Process(pid) while p.is_running() and str(p.status) != 'zombie': os.system('sleep 5')
Keeps a process alive. If the process terminates, it will restart it The terminated processes become zombies. They die when their parent terminates
def get_free_mb(folder): if platform.system() == 'Windows': free_bytes = ctypes.c_ulonglong(0) ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(folder), None, None, ctypes.pointer(free_bytes)) return free_bytes.value/1024/1024 else: st = os.statvfs(folder) return st.f_bavail * st.f_frsize/1024/1024
Return folder/drive free space (in bytes)
def increment_title(title): count = re.search('\d+$', title).group(0) new_title = title[:-(len(count))] + str(int(count)+1) return new_title
Increments a string that ends in a number
def check_limit(self, limit): if limit > 0: self.limit = limit else: raise ValueError("Rule limit must be strictly > 0 ({0} given)" .format(limit)) return self
Checks if the given limit is valid. A limit must be > 0 to be considered valid. Raises ValueError when the *limit* is not > 0.
def build_filter(self, filter): try: self.filter = Filter.from_string(filter, self.limit) except ValueError: raise return self
Tries to build a :class:`filter.Filter` instance from the given filter. Raises ValueError if the :class:`filter.Filter` object can't be build from the given filter.
def build_action(self, action): try: self.action = Action.from_string(action) except ValueError: raise return self
Tries to build an :class:`action.Action` instance from the given action. Raises ValueError if the :class:`action.Action` object can't be build from the given action.
def get_args(): ap = argparse.ArgumentParser(description="Create frames for a movie that can be compiled using ffmpeg") ap.add_argument("start", help="date string as start time") ap.add_argument("end", help="date string as end time") ap.add_argument("step", type=float, help="fraction of a day to step by") ap.add_argument("--config", help="path to a config file", default="config.json") return ap.parse_args()
request the arguments for running
def make_three_color(data, time, step, config, shape=(1280, 1280), lower_val=(0, 0, 0), upper_val=(2.5, 2.5, 2.5)): order = {'red': 0, 'green': 1, 'blue': 2} three_color = np.zeros((shape[0], shape[1], 3)) channel_colors = {color: config.default[color] for color in ['red', 'green', 'blue']} for color, channel in channel_colors.items(): if data[channel][1] is None or \ abs((time - date_parser.parse(data[channel][0]['date-end'])).total_seconds()) > step.total_seconds()/2.0: return np.zeros((shape[0], shape[1], 3)) three_color[:, :, order[color]] = data[channel][1] # scale the image by the power three_color[:, :, order[color]] = np.power(three_color[:, :, order[color]], config.default["{}_power".format(color)]) # adjust the percentile thresholds lower = lower_val[order[color]] upper = upper_val[order[color]] three_color[np.where(three_color[:, :, order[color]] < lower)] = lower three_color[np.where(three_color[:, :, order[color]] > upper)] = upper # image values must be between (0,1) so scale image for color, index in order.items(): three_color[:, :, index] /= upper_val[order[color]] return three_color
create a three color image according to the config file :param data: a dictionary of fetched data where keys correspond to products :param config: a config object :param shape: the size of a composite image :param lower_val: a tuple of lower values for RGB, any value below this is set to the low value :param upper_val: a tuple of upper values for RGB, any value above this is set to the high value :return: a (m,n,3) numpy array for a three color image where all values are between 0 and 1
def main(): args = get_args() args.start = date_parser.parse(args.start) args.end = date_parser.parse(args.end) args.step = timedelta(args.step) config = Config(args.config) times = [args.start + i * args.step for i in range(int((args.end - args.start) / args.step))] for i, time in enumerate(times): make_plot(time, config, args.step)
process the main task
def overall(): return ZeroOrMore(Grammar.comment) + Dict(ZeroOrMore(Group( Grammar._section + ZeroOrMore(Group(Grammar.line))) ))
The overall grammer for pulling apart the main input files.
def file(): return ( Optional(Word(alphanums).setResultsName('alias') + Suppress(Literal('.'))) + Suppress(White()) + Word(approved_printables).setResultsName('filename') )
Grammar for files found in the overall input files.
def command_lines(): return ZeroOrMore(Group( Group(ZeroOrMore(Group(Grammar.comment))) + Grammar._non_comment_line ))
Grammar for commands found in the overall input files.

No dataset card yet

Downloads last month
75