code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def get_resources(minify=False): all_resources = dict() subclasses = resource_base.ResourceBase.__subclasses__() + resource_definitions.ResourceAngular.__subclasses__() for resource in subclasses: obj = resource(minify) all_resources[resource.RESOURCE_NAME] = dict(css=tuple(obj.resources_css), js=tuple(obj.resources_js)) return all_resources
Find all resources which subclass ResourceBase. Keyword arguments: minify -- select minified resources if available. Returns: Dictionary of available resources. Keys are resource names (part of the config variable names), values are dicts with css and js keys, and tuples of resources as values.
def citedby_pid(self, pid, metaonly=False, from_heap=True): if from_heap is True: result = citations.raw_data(pid) if result and 'cited_by' in result and metaonly is True: del(result['cited_by']) return result if result: return result url = urljoin(self.CITEDBY_URL, self.PID_ENDPOINT) params = { "q": pid, "metaonly": "true" if metaonly is True else "false" } result = self._do_request(url, params=params) return result
Retrieve citedby documents from a given PID number. pid: SciELO PID number metaonly: will retrieve only the metadata of the requested article citations including the number of citations it has received. from_heap: will retrieve the number of citations from a preproduced report, it will not fetch the api. Much faster results but not extremely updated.
def citedby_pid(self, pid, metaonly=False, from_heap=True): if from_heap is True: result = citations.raw_data(pid) if result and 'cited_by' in result and metaonly is True: del(result['cited_by']) return result if result: return result result = self.client.citedby_pid(pid, metaonly=metaonly) try: return json.loads(result) except: return None
Retrieve citedby documents from a given PID number. pid: SciELO PID number metaonly: will retrieve only the metadata of the requested article citations including the number of citations it has received. from_heap: will retrieve the number of citations from a preproduced report, it will not fetch the api. Much faster results but not extremelly updated.
def search(self, dsl, params): query_parameters = [] for key, value in params: query_parameters.append(self.CITEDBY_THRIFT.kwargs(str(key), str(value))) try: result = self.client.search(dsl, query_parameters) except self.CITEDBY_THRIFT.ServerError: raise ServerError('you may trying to run a bad DSL Query') try: return json.loads(result) except: return None
Free queries to ES index. dsl (string): with DSL query params (list): [(key, value), (key, value)] where key is a query parameter, and value is the value required for parameter, ex: [('size', '0'), ('search_type', 'count')]
def raise_error(error): exc_type = error.get('exc_type') if exc_type and exc_type.startswith(ERROR_PREFIX): exc_type = exc_type[len(ERROR_PREFIX):] exc_cls = ERROR_TYPE_MAP.get(exc_type, DashiError) else: exc_cls = DashiError raise exc_cls(**error)
Intakes a dict of remote error information and raises a DashiError
def fire(self, name, operation, args=None, **kwargs): if args: if kwargs: raise TypeError("specify args dict or keyword arguments, not both") else: args = kwargs d = dict(op=operation, args=args) headers = {'sender': self.add_sysname(self.name)} dest = self.add_sysname(name) def _fire(channel): with Producer(channel) as producer: producer.publish(d, routing_key=dest, headers=headers, serializer=self._serializer, exchange=self._exchange, declare=[self._exchange]) log.debug("sending message to %s", dest) with connections[self._pool_conn].acquire(block=True) as conn: _, channel = self.ensure(conn, _fire) conn.maybe_close_channel(channel)
Send a message without waiting for a reply @param name: name of destination service queue @param operation: name of service operation to invoke @param args: dictionary of keyword args to pass to operation. Use this OR kwargs. @param kwargs: additional args to pass to operation
def handle(self, operation, operation_name=None, sender_kwarg=None): if not self._consumer: self._consumer = DashiConsumer(self, self._conn, self._name, self._exchange, sysname=self._sysname) self._consumer.add_op(operation_name or operation.__name__, operation, sender_kwarg=sender_kwarg)
Handle an operation using the specified function @param operation: function to call for this operation @param operation_name: operation name. if unspecified operation.__name__ is used @param sender_kwarg: optional keyword arg on operation to feed in sender name
def cancel(self, block=True): if self._consumer: self._consumer.cancel(block=block)
Cancel a call to consume() happening in another thread This could take up to DashiConnection.consumer_timeout to complete. @param block: if True, waits until the consumer has returned
def link_exceptions(self, custom_exception=None, dashi_exception=None): if custom_exception is None: raise ValueError("custom_exception must be set") if dashi_exception is None: raise ValueError("dashi_exception must be set") self._linked_exceptions[custom_exception] = dashi_exception
Link a custom exception thrown on the receiver to a dashi exception
def ensure(self, connection, func, *args, **kwargs): channel = None while 1: try: if channel is None: channel = connection.channel() return func(channel, *args, **kwargs), channel except (connection.connection_errors, IOError): self._call_errback() channel = self.connect(connection)
Perform an operation until success Repeats in the face of connection errors, pursuant to retry policy.
def re_tab(s): l = [] p = 0 for i in range(8, len(s), 8): if s[i - 2:i] == " ": # collapse two or more spaces into a tab l.append(s[p:i].rstrip() + "\t") p = i if p == 0: return s else: l.append(s[p:]) return "".join(l)
Return a tabbed string from an expanded one.
def read_next_line(self): next_line = self.file.readline() if not next_line or next_line[-1:] != '\n': # no newline on last line of file self.file = None else: # trim newline characters next_line = next_line[:-1] expanded = next_line.expandtabs() edit = urwid.Edit("", expanded, allow_tab=True) edit.set_edit_pos(0) edit.original_text = next_line self.lines.append(edit) return next_line
Read another line from the file.
def _get_at_pos(self, pos): if pos < 0: # line 0 is the start of the file, no more above return None, None if len(self.lines) > pos: # we have that line so return it return self.lines[pos], pos if self.file is None: # file is closed, so there are no more lines return None, None assert pos == len(self.lines), "out of order request?" self.read_next_line() return self.lines[-1], pos
Return a widget for the line number passed.
def split_focus(self): focus = self.lines[self.focus] pos = focus.edit_pos edit = urwid.Edit("", focus.edit_text[pos:], allow_tab=True) edit.original_text = "" focus.set_edit_text(focus.edit_text[:pos]) edit.set_edit_pos(0) self.lines.insert(self.focus + 1, edit)
Divide the focus edit widget at the cursor location.
def combine_focus_with_prev(self): above, ignore = self.get_prev(self.focus) if above is None: # already at the top return focus = self.lines[self.focus] above.set_edit_pos(len(above.edit_text)) above.set_edit_text(above.edit_text + focus.edit_text) del self.lines[self.focus] self.focus -= 1
Combine the focus edit widget with the one above.
def combine_focus_with_next(self): below, ignore = self.get_next(self.focus) if below is None: # already at bottom return focus = self.lines[self.focus] focus.set_edit_text(focus.edit_text + below.edit_text) del self.lines[self.focus + 1]
Combine the focus edit widget with the one below.
def handle_keypress(self, k): if k == "esc": self.save_file() raise urwid.ExitMainLoop() elif k == "delete": # delete at end of line self.walker.combine_focus_with_next() elif k == "backspace": # backspace at beginning of line self.walker.combine_focus_with_prev() elif k == "enter": # start new line self.walker.split_focus() # move the cursor to the new line and reset pref_col self.view.keypress(size, "down") self.view.keypress(size, "home")
Last resort for keypresses.
def save_file(self): l = [] walk = self.walker for edit in walk.lines: # collect the text already stored in edit widgets if edit.original_text.expandtabs() == edit.edit_text: l.append(edit.original_text) else: l.append(re_tab(edit.edit_text)) # then the rest while walk.file is not None: l.append(walk.read_next_line()) # write back to disk outfile = open(self.save_name, "w") l_iter = iter(l) line = next(l_iter) prefix = "" while True: try: outfile.write(prefix + line) prefix = "\n" line = next(l_iter) except StopIteration: if line != "\n": outfile.write("\n") break
Write the file out to disk.
def _media(self): css = ['markymark/css/markdown-editor.css'] iconlibrary_css = getattr( settings, 'MARKYMARK_FONTAWESOME_CSS', 'markymark/fontawesome/fontawesome.min.css' ) if iconlibrary_css: css.append(iconlibrary_css) media = forms.Media( css={'all': css}, js=('markymark/js/markdown-editor.js',) ) # Use official extension loading to initialize all extensions # and hook in extension-defined media files. renderer = initialize_renderer() for extension in renderer.registeredExtensions: if hasattr(extension, 'media'): media += extension.media return media
Returns a forms.Media instance with the basic editor media and media from all registered extensions.
def rel(path, parent=None, par=False): try: res = os.path.relpath(path, parent) except ValueError: # Raised eg. on Windows for differing drive letters. if not par: return abs(path) raise else: if not par and not issub(res): return abs(path) return res
Takes *path* and computes the relative path from *parent*. If *parent* is omitted, the current working directory is used. If *par* is #True, a relative path is always created when possible. Otherwise, a relative path is only returned if *path* lives inside the *parent* directory.
def issub(path): if isabs(path): return False if path.startswith(curdir + sep) or path.startswith(pardir + sep) or \ path == curdir or path == pardir: return False return True
Returns #True if *path* is a relative path that does not point outside of its parent directory or is equal to its parent directory (thus, this function will also return False for a path like `./`).
def addtobase(subject, base_suffix): if not base_suffix: return subject base, ext = os.path.splitext(subject) return base + base_suffix + ext
Adds the string *base_suffix* to the basename of *subject*.
def addprefix(subject, prefix): if not prefix: return subject dir_, base = split(subject) if callable(prefix): base = prefix(base) else: base = prefix + base return join(dir_, base)
Adds the specified *prefix* to the last path element in *subject*. If *prefix* is a callable, it must accept exactly one argument, which is the last path element, and return a modified value.
def addsuffix(subject, suffix, replace=False): if not suffix and not replace: return subject if replace: subject = rmvsuffix(subject) if suffix and callable(suffix): subject = suffix(subject) elif suffix: subject += suffix return subject
Adds the specified *suffix* to the *subject*. If *replace* is True, the old suffix will be removed first. If *suffix* is callable, it must accept exactly one argument and return a modified value.
def rmvsuffix(subject): index = subject.rfind('.') if index > subject.replace('\\', '/').rfind('/'): subject = subject[:index] return subject
Remove the suffix from *subject*.
def getsuffix(subject): index = subject.rfind('.') if index > subject.replace('\\', '/').rfind('/'): return subject[index+1:] return None
Returns the suffix of a filename. If the file has no suffix, returns None. Can return an empty string if the filenam ends with a period.
def makedirs(path, exist_ok=True): try: os.makedirs(path) except OSError as exc: if exist_ok and exc.errno == errno.EEXIST: return raise
Like #os.makedirs(), with *exist_ok* defaulting to #True.
def chmod_update(flags, modstring): mapping = { 'r': (_stat.S_IRUSR, _stat.S_IRGRP, _stat.S_IROTH), 'w': (_stat.S_IWUSR, _stat.S_IWGRP, _stat.S_IWOTH), 'x': (_stat.S_IXUSR, _stat.S_IXGRP, _stat.S_IXOTH) } target, direction = 'a', None for c in modstring: if c in '+-': direction = c continue if c in 'ugoa': target = c direction = None # Need a - or + after group specifier. continue if c in 'rwx' and direction in '+-': if target == 'a': mask = functools.reduce(operator.or_, mapping[c]) else: mask = mapping[c]['ugo'.index(target)] if direction == '-': flags &= ~mask else: flags |= mask continue raise ValueError('invalid chmod: {!r}'.format(modstring)) return flags
Modifies *flags* according to *modstring*.
def chmod_repr(flags): template = 'rwxrwxrwx' order = (_stat.S_IRUSR, _stat.S_IWUSR, _stat.S_IXUSR, _stat.S_IRGRP, _stat.S_IWGRP, _stat.S_IXGRP, _stat.S_IROTH, _stat.S_IWOTH, _stat.S_IXOTH) return ''.join(template[i] if flags&x else '-' for i, x in enumerate(order))
Returns a string representation of the access flags *flags*.
def compare_timestamp(src, dst): try: dst_time = os.path.getmtime(dst) except OSError as exc: if exc.errno == errno.ENOENT: return True # dst does not exist src_time = os.path.getmtime(src) return src_time > dst_time
Compares the timestamps of file *src* and *dst*, returning #True if the *dst* is out of date or does not exist. Raises an #OSError if the *src* file does not exist.
def init_app(self, app): # Set default Flask config option. app.config.setdefault('STATICS_MINIFY', False) # Select resources. self.all_resources = ALL_RESOURCES_MINIFIED if app.config.get('STATICS_MINIFY') else ALL_RESOURCES self.all_variables = ALL_VARIABLES # Add this instance to app.extensions. if not hasattr(app, 'extensions'): app.extensions = dict() if 'statics' in app.extensions: raise ValueError('Already registered extension STATICS.') app.extensions['statics'] = _StaticsState(self, app) # Initialize blueprint. name = 'flask_statics_helper' static_url_path = '{0}/{1}'.format(app.static_url_path, name) self.blueprint = Blueprint(name, __name__, template_folder='templates', static_folder='static', static_url_path=static_url_path) self.blueprint.add_app_template_global(self.all_variables, '_flask_statics_helper_all_variables') self.blueprint.add_app_template_global(self.all_resources, '_flask_statics_helper_all_resources') app.register_blueprint(self.blueprint)
Initialize the extension.
def measure_board_rms(control_board, n_samples=10, sampling_ms=10, delay_between_samples_ms=0): ''' Read RMS voltage samples from control board high-voltage feedback circuit. ''' try: results = control_board.measure_impedance(n_samples, sampling_ms, delay_between_samples_ms, True, True, []) except RuntimeError: # `RuntimeError` may be raised if, for example, current limit was # reached during measurement. In such cases, return an empty frame. logger.warning('Error encountered during high-voltage RMS ' 'measurement.', exc_info=True) data = pd.DataFrame(None, columns=['board measured V', 'divider resistor index']) else: data = pd.DataFrame({'board measured V': results.V_hv}) data['divider resistor index'] = results.hv_resistor return datf measure_board_rms(control_board, n_samples=10, sampling_ms=10, delay_between_samples_ms=0): ''' Read RMS voltage samples from control board high-voltage feedback circuit. ''' try: results = control_board.measure_impedance(n_samples, sampling_ms, delay_between_samples_ms, True, True, []) except RuntimeError: # `RuntimeError` may be raised if, for example, current limit was # reached during measurement. In such cases, return an empty frame. logger.warning('Error encountered during high-voltage RMS ' 'measurement.', exc_info=True) data = pd.DataFrame(None, columns=['board measured V', 'divider resistor index']) else: data = pd.DataFrame({'board measured V': results.V_hv}) data['divider resistor index'] = results.hv_resistor return data
Read RMS voltage samples from control board high-voltage feedback circuit.
def find_good(control_board, actuation_steps, resistor_index, start_index, end_index): ''' Use a binary search over the range of provided actuation_steps to find the maximum actuation voltage that is measured by the board feedback circuit using the specified feedback resistor. ''' lower = start_index upper = end_index while lower < upper - 1: index = lower + (upper - lower) / 2 v = actuation_steps[index] control_board.set_waveform_voltage(v) data = measure_board_rms(control_board) valid_data = data[data['divider resistor index'] >= 0] if (valid_data['divider resistor index'] < resistor_index).sum(): # We have some measurements from another resistor. upper = index else: lower = index control_board.set_waveform_voltage(actuation_steps[lower]) data = measure_board_rms(control_board) return lower, datf find_good(control_board, actuation_steps, resistor_index, start_index, end_index): ''' Use a binary search over the range of provided actuation_steps to find the maximum actuation voltage that is measured by the board feedback circuit using the specified feedback resistor. ''' lower = start_index upper = end_index while lower < upper - 1: index = lower + (upper - lower) / 2 v = actuation_steps[index] control_board.set_waveform_voltage(v) data = measure_board_rms(control_board) valid_data = data[data['divider resistor index'] >= 0] if (valid_data['divider resistor index'] < resistor_index).sum(): # We have some measurements from another resistor. upper = index else: lower = index control_board.set_waveform_voltage(actuation_steps[lower]) data = measure_board_rms(control_board) return lower, data
Use a binary search over the range of provided actuation_steps to find the maximum actuation voltage that is measured by the board feedback circuit using the specified feedback resistor.
def update_control_board_calibration(control_board, fitted_params): ''' Update the control board with the specified fitted parameters. ''' # Update the control board with the new fitted capacitor and resistor # values for the reference load analog input (channel 0). control_board.a0_series_resistance = fitted_params['fitted R'].values control_board.a0_series_capacitance = fitted_params['fitted C'].valuef update_control_board_calibration(control_board, fitted_params): ''' Update the control board with the specified fitted parameters. ''' # Update the control board with the new fitted capacitor and resistor # values for the reference load analog input (channel 0). control_board.a0_series_resistance = fitted_params['fitted R'].values control_board.a0_series_capacitance = fitted_params['fitted C'].values
Update the control board with the specified fitted parameters.
def load(self): data = self.dict_class() for path in self.paths: if path in self.paths_loaded: continue try: with open(path, 'r') as file: path_data = yaml.load(file.read()) data = dict_merge(data, path_data) self.paths_loaded.add(path) except IOError: # TODO: Log this correctly once logging is implemented if not path.endswith('.local.yml'): print 'CONFIG NOT FOUND: %s' % (path) self.data = data
Load each path in order. Remember paths already loaded and only load new ones.
def _initialize(self, settings_module): #Get the global settings values and assign them as self attributes self.settings_list = [] for setting in dir(global_settings): #Only get upper case settings if setting == setting.upper(): setattr(self, setting, getattr(global_settings, setting)) self.settings_list.append(setting) #If a settings module was passed in, import it, and grab settings from it #Overwrite global settings with theses if settings_module is not None: self.SETTINGS_MODULE = settings_module #Try to import the settings module try: mod = import_module(self.SETTINGS_MODULE) except ImportError: error_message = "Could not import settings at {0}".format(self.SETTINGS_MODULE) log.exception(error_message) raise ImportError(error_message) #Grab uppercased settings as set them as self attrs for setting in dir(mod): if setting == setting.upper(): if setting == "INSTALLED_APPS": self.INSTALLED_APPS += getattr(mod, setting) else: setattr(self, setting, getattr(mod, setting)) self.settings_list.append(setting) #If PATH_SETTINGS is in the settings file, extend the system path to include it if hasattr(self, "PATH_SETTINGS"): for path in self.PATH_SETTINGS: sys.path.extend(getattr(self,path)) self.settings_list = list(set(self.settings_list))
Initialize the settings from a given settings_module settings_module - path to settings module
def _setup(self): settings_module = None #Get the settings module from the environment variables try: settings_module = os.environ[global_settings.MODULE_VARIABLE] except KeyError: error_message = "Settings not properly configured. Cannot find the environment variable {0}".format(global_settings.MODULE_VARIABLE) log.exception(error_message) self._initialize(settings_module) self._configure_logging()
Perform initial setup of the settings class, such as getting the settings module and setting the settings
def _configure_logging(self): if not self.LOGGING_CONFIG: #Fallback to default logging in global settings if needed dictConfig(self.DEFAULT_LOGGING) else: dictConfig(self.LOGGING_CONFIG)
Setting up logging from logging config in settings
def ensure_context(**vars): ctx = _context_stack.top stacked = False if not ctx: ctx = Context() stacked = True _context_stack.push(ctx) ctx.update(vars) try: yield ctx finally: if stacked: _context_stack.pop()
Ensures that a context is in the stack, creates one otherwise.
def request_context(app, request): vars = {} if request.view_args is not None: vars.update(request.view_args) vars.update({ "request": request, "GET": AttrDict(request.args.to_dict()), "POST" : AttrDict(request.form.to_dict()), "app": app, "config": app.config, "session": session, "g": g, "now": datetime.datetime.now, "utcnow": datetime.datetime.utcnow, "today": datetime.date.today}) context = Context(vars) context.vars["current_context"] = context return context
Creates a Context instance from the given request object
def clone(self, **override_vars): c = Context(self.vars, self.data) c.executed_actions = set(self.executed_actions) c.vars.update(override_vars) return c
Creates a copy of this context
def mpl_get_cb_bound_below_plot(ax): position = ax.get_position() figW, figH = ax.get_figure().get_size_inches() fig_aspect = figH / figW box_aspect = ax.get_data_ratio() pb = position.frozen() pb1 = pb.shrunk_to_aspect(box_aspect, pb, fig_aspect).bounds ax_size = ax.get_position().bounds # the colorbar is set to 0.01 width sizes = [ax_size[0], ax_size[1] - 0.14, pb1[2], 0.03] return sizes
Return the coordinates for a colorbar axes below the provided axes object. Take into account the changes of the axes due to aspect ratio settings. Parts of this code are taken from the transforms.py file from matplotlib Important: Use only AFTER fig.subplots_adjust(...) Use as: =======
def main(): table = """<table> <thead> <tr><th>First Name</th><th>Last Name</th></tr> </thead> <tbody> <tr><td>Paul</td><td>McGrath</td></tr> <tr><td>Liam</td><td>Brady</td></tr> <tr><td>John</td><td>Giles</td></tr> </tbody> </table>""" docraptor = DocRaptor() print("Create test_basic.xls") with open("test_basic.xls", "wb") as pdf_file: pdf_file.write( docraptor.create( {"document_content": table, "document_type": "xls", "test": True} ).content )
Generate an XLS with specified content.
def restore_gc_state(): old_isenabled = gc.isenabled() old_flags = gc.get_debug() try: yield finally: gc.set_debug(old_flags) (gc.enable if old_isenabled else gc.disable)()
Restore the garbage collector state on leaving the with block.
def response(self, parameters): r # get a config object self._set_parameters(parameters) terms = self.m / (1 + (1j * self.w * self.tau) ** self.c) # sum up terms specs = np.sum(terms, axis=1) ccomplex = self.sigmai * (1 - specs) response = sip_response.sip_response(self.f, ccomplex=ccomplex) return response
r"""Return the forward response in base dimensions :math:`\hat{\sigma }(\omega ) = \sigma _\infty \left(1 - \sum_i \frac {m_i}{1 + (j \omega \tau_i)^c_i}\right)` Parameters ---------- pars: Returns ------- response: Nx2 array, first axis denotes frequencies, seconds real and imaginary parts
def dre_dsigmai(self, pars): r self._set_parameters(pars) terms = self.m * self.num / self.denom specs = np.sum(terms, axis=1) result = 1 - specs return result
r""" :math:Add formula
def dre_dm(self, pars): r self._set_parameters(pars) terms = self.num / self.denom result = - self.sigmai * terms return result
r""" :math:Add formula
def dre_dtau(self, pars): r self._set_parameters(pars) # term 1 num1 = self.c * self.w * self.otc1 * np.cos(self.ang) term1 = num1/self.denom # term 2 num2a = self.otc * np.cos(self.ang) num2b = 1 + num2a denom2 = self.denom ** 2 term2 = num2b / denom2 # term 3 term3 = 2 * self.c * self.w * self.otc1 * np.cos(self.ang) + self.otc2 result = self.sigmai * self.m * (term1 + term2 * term3) return result
r""" :math:Add formula
def dre_dc(self, pars): r self._set_parameters(pars) # term 1 num1a = np.log(self.w * self.tau) * self.otc * np.sin(self.ang) num1b = self.otc * np.cos(self.ang) * np.pi / 2.0 term1 = (num1a + num1b) / self.denom # term 2 num2 = self.otc * np.sin(self.c / np.pi) * 2 denom2 = self.denom ** 2 term2 = num2 / denom2 # term 3 num3a = 2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang) num3b = 2 * ((self.w * self.tau) ** 2) * np.pi / 2.0 * np.sin(self.ang) num3c = 2 * np.log(self.w * self.tau) * self.otc2 term3 = num3a - num3b + num3c result = self.sigmai * self.m * (term1 + term2 * term3) return result
r""" :math:Add formula
def dim_dsigmai(self, pars): r self._set_parameters(pars) result = np.sum(- self.m * self.otc * np.sin(self.ang) / self.denom, axis=1) return result
r""" :math:Add formula
def dim_dm(self, pars): r self._set_parameters(pars) num1 = self.otc * np.sin(self.ang) result = -self.sigmai * num1 / self.denom return result
r""" :math:Add formula
def dim_dtau(self, pars): r self._set_parameters(pars) # term 1 num1 = -self.m * (self.w ** self.c) * self.c\ * (self.tau ** (self.c - 1)) * np.sin(self.ang) term1 = self.sigmai * num1 / self.denom # term 2 num2a = -self.m * self.otc * np.sin(self.ang) num2b = 2 * (self.w ** 2.0) * self.c * (self.tau ** (self.c - 1)) *\ np.cos(self.ang) num2c = 2 * self.c * (self.w ** (self.c * 2)) *\ (self.tau ** (2 * self.c - 1)) term2 = self.sigma0 * num2a * (num2b + num2c) / (self.denom ** 2) result = term1 + term2 return result
r""" :math:Add formula
def dim_dc(self, pars): r self._set_parameters(pars) # term 1 num1a = self.m * np.sin(self.ang) * np.log(self.w * self.tau)\ * self.otc num1b = self.m * self.otc * np.pi / 2 * np.cos(np.pi / 2) term1 = self.sigma0 * (-num1a - num1b) / self.denom # term 2 num2a = -self.m * self.otc * np.cos(self.ang) num2b = -2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang) num2c = 2 * self.otc * np.pi / 2 * np.cos(self.ang) num2d = 2 * np.log(self.w * self.tau) * self.otc2 numerator = num2a * (num2b + num2c) + num2d term2 = self.sigma0 * numerator / (self.denom ** 2) result = term1 + term2 return result
r""" :math:Add formula
def add_view_file_mapping(self, pattern, cls): if isinstance(pattern, str): if not pattern.endswith("*"): _, ext = os.path.splitext(pattern) self.allowed_extensions.add(ext) pattern = re.compile("^" + re.escape(pattern).replace("\\*", ".+") + "$", re.I) self.view_class_files_map.append((pattern, cls))
Adds a mapping between a file and a view class. Pattern can be an extension in the form .EXT or a filename.
def load_file(self, app, pathname, relpath, pypath): try: view_class = self.get_file_view_cls(relpath) return create_view_from_file(pathname, source_template=relpath, view_class=view_class) except DeclarativeViewError: pass
Loads a file and creates a View from it. Files are split between a YAML front-matter and the content (unless it is a .yml file).
def get_file_view_cls(self, filename): if filename is None: return self.default_view_class for pattern, cls in self.view_class_files_map: if pattern.match(filename): return cls return self.default_view_class
Returns the view class associated to a filename
def children(self, vertex): return [self.head(edge) for edge in self.out_edges(vertex)]
Return the list of immediate children of the given vertex.
def parents(self, vertex): return [self.tail(edge) for edge in self.in_edges(vertex)]
Return the list of immediate parents of this vertex.
def references(self): return [ (tail, head) for tail in self.vertices for head in self.children(tail) ]
Return (tail, head) pairs for each edge in the graph.
def descendants(self, start, generations=None): visited = self.vertex_set() visited.add(start) to_visit = deque([(start, 0)]) while to_visit: vertex, depth = to_visit.popleft() if depth == generations: continue for child in self.children(vertex): if child not in visited: visited.add(child) to_visit.append((child, depth+1)) return self.full_subgraph(visited)
Return the subgraph of all nodes reachable from the given start vertex, including that vertex. If specified, the optional `generations` argument specifies how many generations to limit to.
def ancestors(self, start, generations=None): visited = self.vertex_set() visited.add(start) to_visit = deque([(start, 0)]) while to_visit: vertex, depth = to_visit.popleft() if depth == generations: continue for parent in self.parents(vertex): if parent not in visited: visited.add(parent) to_visit.append((parent, depth+1)) return self.full_subgraph(visited)
Return the subgraph of all nodes from which the given vertex is reachable, including that vertex. If specified, the optional `generations` argument specifies how many generations to limit to.
def source_components(self): raw_sccs = self._component_graph() # Construct a dictionary mapping each vertex to the root of its scc. vertex_to_root = self.vertex_dict() # And keep track of which SCCs have incoming edges. non_sources = self.vertex_set() # Build maps from vertices to roots, and identify the sccs that *are* # reachable from other components. for scc in raw_sccs: root = scc[0][1] for item_type, w in scc: if item_type == 'VERTEX': vertex_to_root[w] = root elif item_type == 'EDGE': non_sources.add(vertex_to_root[w]) sccs = [] for raw_scc in raw_sccs: root = raw_scc[0][1] if root not in non_sources: sccs.append([v for vtype, v in raw_scc if vtype == 'VERTEX']) return [self.full_subgraph(scc) for scc in sccs]
Return the strongly connected components not reachable from any other component. Any component in the graph is reachable from one of these.
def strongly_connected_components(self): raw_sccs = self._component_graph() sccs = [] for raw_scc in raw_sccs: sccs.append([v for vtype, v in raw_scc if vtype == 'VERTEX']) return [self.full_subgraph(scc) for scc in sccs]
Return list of strongly connected components of this graph. Returns a list of subgraphs. Algorithm is based on that described in "Path-based depth-first search for strong and biconnected components" by Harold N. Gabow, Inf.Process.Lett. 74 (2000) 107--114.
def save(self, *args, **kwargs): if not self.full_name: self.full_name = '{0}{1}{2}'.format( self.first_name, '{}'.format( ' ' + self.middle_name + ' ' if self.middle_name else ' ', ), self.last_name, '{}'.format(' ' + self.suffix if self.suffix else '') ) self.slug = uuslug( self.full_name, instance=self, max_length=100, separator='-', start_no=2 ) if not self.uid: self.uid = 'person:{}'.format(self.slug) super(Person, self).save(*args, **kwargs)
**uid**: :code:`person:{slug}`
def _prepare_orders(self, orders): for detail in PAYU_ORDER_DETAILS: if not any([detail in order for order in orders]): for order in orders: order[detail] = PAYU_ORDER_DETAILS_DEFAULTS.get(detail, None) return orders
Each order needs to have all it's details filled with default value, or None, in case those are not already filled.
def staticfiles_url_fetcher(url): if url.startswith('/'): base_url = staticfiles_storage.base_url filename = url.replace(base_url, '', 1) path = finders.find(filename) if path: # This should match most cases. Manifest static files with relative # URLs will only be picked up in DEBUG mode here. with open(path, 'rb') as f: data = f.read() else: # This should just match things like Manifest static files with # relative URLs. While this code path will expect `collectstatic` # to have run, it should only be reached on if DEBUG = False. # XXX: Only Django >= 2.0 supports using this as a context manager: f = staticfiles_storage.open(filename) data = f.read() f.close() return { 'string': data, 'mime_type': mimetypes.guess_type(url)[0], } else: return default_url_fetcher(url)
Returns the file matching url. This method will handle any URL resources that rendering HTML requires (eg: images pointed my ``img`` tags, stylesheets, etc). The default behaviour will fetch any http(s) files normally, and will also attempt to resolve staticfiles internally (this should mostly affect development scenarios, but also works if static files are served under a relative url). Returns a dictionary with two entries: ``string``, which is the resources data as a string and ``mime_type``, which is the identified mime type for the resource.
def render_pdf( template, file_, url_fetcher=staticfiles_url_fetcher, context=None, ): context = context or {} html = get_template(template).render(context) HTML( string=html, base_url='not-used://', url_fetcher=url_fetcher, ).write_pdf( target=file_, )
Writes the PDF data into ``file_``. Note that ``file_`` can actually be a Django Response object as well. This function may be used as a helper that can be used to save a PDF file to a file (or anything else outside of a request/response cycle), eg:: :param str html: A rendered HTML. :param file file_: A file like object (or a Response) where to output the rendered PDF.
def encode_bytes(src_buf, dst_file): if not isinstance(src_buf, bytes): raise TypeError('src_buf must by bytes.') len_src_buf = len(src_buf) assert 0 <= len_src_buf <= 2**16-1 num_written_bytes = len_src_buf + 2 len_buf = FIELD_U16.pack(len_src_buf) dst_file.write(len_buf) dst_file.write(src_buf) return num_written_bytes
Encode a buffer length followed by the bytes of the buffer itself. Parameters ---------- src_buf: bytes Source bytes to be encoded. Function asserts that 0 <= len(src_buf) <= 2**16-1. dst_file: file File-like object with write method. Returns ------- int Number of bytes written to `dst_file`.
def decode_bytes(f): buf = f.read(FIELD_U16.size) if len(buf) < FIELD_U16.size: raise UnderflowDecodeError() (num_bytes,) = FIELD_U16.unpack_from(buf) num_bytes_consumed = FIELD_U16.size + num_bytes buf = f.read(num_bytes) if len(buf) < num_bytes: raise UnderflowDecodeError() return num_bytes_consumed, buf
Decode a buffer length from a 2-byte unsigned int then read the subsequent bytes. Parameters ---------- f: file File-like object with read method. Raises ------ UnderflowDecodeError When the end of stream is encountered before the end of the encoded bytes. Returns ------- int Number of bytes read from `f`. bytes Value bytes decoded from `f`.
def encode_utf8(s, f): encode = codecs.getencoder('utf8') encoded_str_bytes, num_encoded_chars = encode(s) num_encoded_str_bytes = len(encoded_str_bytes) assert 0 <= num_encoded_str_bytes <= 2**16-1 num_encoded_bytes = num_encoded_str_bytes + 2 f.write(FIELD_U8.pack((num_encoded_str_bytes & 0xff00) >> 8)) f.write(FIELD_U8.pack(num_encoded_str_bytes & 0x00ff)) f.write(encoded_str_bytes) return num_encoded_bytes
UTF-8 encodes string `s` to file-like object `f` according to the MQTT Version 3.1.1 specification in section 1.5.3. The maximum length for the encoded string is 2**16-1 (65535) bytes. An assertion error will result if the encoded string is longer. Parameters ---------- s: str String to be encoded. f: file File-like object. Returns ------- int Number of bytes written to f.
def decode_utf8(f): decode = codecs.getdecoder('utf8') buf = f.read(FIELD_U16.size) if len(buf) < FIELD_U16.size: raise UnderflowDecodeError() (num_utf8_bytes,) = FIELD_U16.unpack_from(buf) num_bytes_consumed = FIELD_U16.size + num_utf8_bytes buf = f.read(num_utf8_bytes) if len(buf) < num_utf8_bytes: raise UnderflowDecodeError() try: s, num_chars = decode(buf, 'strict') except UnicodeError as e: raise Utf8DecodeError(e) return num_bytes_consumed, s
Decode a utf-8 string encoded as described in MQTT Version 3.1.1 section 1.5.3 line 177. This is a 16-bit unsigned length followed by a utf-8 encoded string. Parameters ---------- f: file File-like object with read method. Raises ------ UnderflowDecodeError Raised when a read failed to extract enough bytes from the underlying stream to decode the string. Utf8DecodeError When any code point in the utf-8 string is invalid. Returns ------- int Number of bytes consumed. str A string utf-8 decoded from ``f``.
def encode_varint(v, f): assert v >= 0 num_bytes = 0 while True: b = v % 0x80 v = v // 0x80 if v > 0: b = b | 0x80 f.write(FIELD_U8.pack(b)) num_bytes += 1 if v == 0: break return num_bytes
Encode integer `v` to file `f`. Parameters ---------- v: int Integer v >= 0. f: file Object containing a write method. Returns ------- int Number of bytes written.
def decode_varint(f, max_bytes=4): num_bytes_consumed = 0 value = 0 m = 1 while True: buf = f.read(1) if len(buf) == 0: raise UnderflowDecodeError() (u8,) = FIELD_U8.unpack(buf) value += (u8 & 0x7f) * m m *= 0x80 num_bytes_consumed += 1 if u8 & 0x80 == 0: # No further bytes break elif max_bytes is not None and num_bytes_consumed >= max_bytes: raise DecodeError('Variable integer contained more than maximum bytes ({}).'.format(max_bytes)) return num_bytes_consumed, value
Decode variable integer using algorithm similar to that described in MQTT Version 3.1.1 line 297. Parameters ---------- f: file Object with a read method. max_bytes: int or None If a varint cannot be constructed using `max_bytes` or fewer from f then raises a `DecodeError`. If None then there is no maximum number of bytes. Raises ------- DecodeError When length is greater than max_bytes. UnderflowDecodeError When file ends before enough bytes can be read to construct the varint. Returns ------- int Number of bytes consumed. int Value extracted from `f`.
def unpack(self, struct): v = struct.unpack(self.read(struct.size)) return v
Read as many bytes as are required to extract struct then unpack and return a tuple of the values. Raises ------ UnderflowDecodeError Raised when a read failed to extract enough bytes from the underlying stream to extract the bytes. Parameters ---------- struct: struct.Struct Returns ------- tuple Tuple of extracted values.
def unpack_utf8(self): num_bytes_consumed, s = decode_utf8(self.__f) self.__num_bytes_consumed += num_bytes_consumed return num_bytes_consumed, s
Decode a utf-8 string encoded as described in MQTT Version 3.1.1 section 1.5.3 line 177. This is a 16-bit unsigned length followed by a utf-8 encoded string. Raises ------ UnderflowDecodeError Raised when a read failed to extract enough bytes from the underlying stream to decode the string. DecodeError When any code point in the utf-8 string is invalid. Returns ------- int Number of bytes consumed. str A string utf-8 decoded from the underlying stream.
def unpack_bytes(self): num_bytes_consumed, b = decode_bytes(self.__f) self.__num_bytes_consumed += num_bytes_consumed return num_bytes_consumed, b
Unpack a utf-8 string encoded as described in MQTT Version 3.1.1 section 1.5.3 line 177. This is a 16-bit unsigned length followed by a utf-8 encoded string. Returns ------- int Number of bytes consumed bytes A bytes object extracted from the underlying stream.
def unpack_varint(self, max_bytes): num_bytes_consumed, value = decode_varint(self.__f, max_bytes) self.__num_bytes_consumed += num_bytes_consumed return num_bytes_consumed, value
Decode variable integer using algorithm similar to that described in MQTT Version 3.1.1 line 297. Parameters ---------- max_bytes: int or None If a varint cannot be constructed using `max_bytes` or fewer from f then raises a `DecodeError`. If None then there is no maximum number of bytes. Raises ------- DecodeError When length is greater than max_bytes. UnderflowDecodeError When file ends before enough bytes can be read to construct the varint. Returns ------- int Number of bytes consumed. int Value extracted from `f`.
def read(self, num_bytes): buf = self.__f.read(num_bytes) assert len(buf) <= num_bytes if len(buf) < num_bytes: raise UnderflowDecodeError() self.__num_bytes_consumed += num_bytes return buf
Read `num_bytes` and return them. Parameters ---------- num_bytes : int Number of bytes to extract from the underlying stream. Raises ------ UnderflowDecodeError Raised when a read failed to extract enough bytes from the underlying stream to extract the bytes. Returns ------- bytes A bytes object extracted from underlying stream.
def read(self, max_bytes=1): if self.limit is None: b = self.__f.read(max_bytes) else: if self.__num_bytes_consumed + max_bytes > self.limit: max_bytes = self.limit - self.__num_bytes_consumed b = self.__f.read(max_bytes) self.__num_bytes_consumed += len(b) return b
Read at most `max_bytes` from internal buffer. Parameters ----------- max_bytes: int Maximum number of bytes to read. Returns -------- bytes Bytes extracted from internal buffer. Length may be less than ``max_bytes``. On end-of file returns a bytes object with zero-length.
def read(self, max_bytes=1): if self.__num_bytes_consumed is None: raise ValueError('I/O operation on closed file.') if self.__num_bytes_consumed + max_bytes >= len(self.__buf): max_bytes = len(self.__buf) - self.__num_bytes_consumed b = self.__buf[self.__num_bytes_consumed:self.__num_bytes_consumed + max_bytes] self.__num_bytes_consumed += max_bytes if isinstance(b, bytearray): b = bytes(b) assert isinstance(b, bytes) return b
Read at most `max_bytes` from internal buffer. Parameters ----------- max_bytes: int Maximum number of bytes to read. Raises ------ ValueError If read is called after close has been called. Returns -------- bytes Bytes extracted from internal buffer. Length may be less than `max_bytes`. On end-of file returns a bytes object with zero-length.
def timeout(self, value): ''' Specifies a timeout on the search query ''' if not self.params: self.params = dict(timeout=value) return self self.params['timeout'] = value return self timeout(self, value): ''' Specifies a timeout on the search query ''' if not self.params: self.params = dict(timeout=value) return self self.params['timeout'] = value return self
Specifies a timeout on the search query
def filtered(self, efilter): ''' Applies a filter to the search ''' if not self.params: self.params={'filter' : efilter} return self if not self.params.has_key('filter'): self.params['filter'] = efilter return self self.params['filter'].update(efilter) return self filtered(self, efilter): ''' Applies a filter to the search ''' if not self.params: self.params={'filter' : efilter} return self if not self.params.has_key('filter'): self.params['filter'] = efilter return self self.params['filter'].update(efilter) return self
Applies a filter to the search
def size(self,value): ''' The number of hits to return. Defaults to 10 ''' if not self.params: self.params = dict(size=value) return self self.params['size'] = value return self size(self,value): ''' The number of hits to return. Defaults to 10 ''' if not self.params: self.params = dict(size=value) return self self.params['size'] = value return self
The number of hits to return. Defaults to 10
def from_offset(self, value): ''' The starting from index of the hits to return. Defaults to 0. ''' if not self.params: self.params = dict({'from':value}) return self self.params['from'] = value return self from_offset(self, value): ''' The starting from index of the hits to return. Defaults to 0. ''' if not self.params: self.params = dict({'from':value}) return self self.params['from'] = value return self
The starting from index of the hits to return. Defaults to 0.
def sort(self, *args, **kwargs): ''' http://www.elasticsearch.org/guide/reference/api/search/sort.html Allows to add one or more sort on specific fields. Each sort can be reversed as well. The sort is defined on a per field level, with special field name for _score to sort by score. standard arguments are ordered ascending, keyword arguments are fields and you specify the order either asc or desc ''' if not self.params: self.params = dict() self.params['sort'] = list() for arg in args: self.params['sort'].append(arg) for k,v in kwargs.iteritems(): self.params['sort'].append({k : v}) return self sort(self, *args, **kwargs): ''' http://www.elasticsearch.org/guide/reference/api/search/sort.html Allows to add one or more sort on specific fields. Each sort can be reversed as well. The sort is defined on a per field level, with special field name for _score to sort by score. standard arguments are ordered ascending, keyword arguments are fields and you specify the order either asc or desc ''' if not self.params: self.params = dict() self.params['sort'] = list() for arg in args: self.params['sort'].append(arg) for k,v in kwargs.iteritems(): self.params['sort'].append({k : v}) return self
http://www.elasticsearch.org/guide/reference/api/search/sort.html Allows to add one or more sort on specific fields. Each sort can be reversed as well. The sort is defined on a per field level, with special field name for _score to sort by score. standard arguments are ordered ascending, keyword arguments are fields and you specify the order either asc or desc
def sorted(self, fsort): ''' Allows to add one or more sort on specific fields. Each sort can be reversed as well. The sort is defined on a per field level, with special field name for _score to sort by score. ''' if not self.params: self.params = dict() self.params['sort'] = fsort return self sorted(self, fsort): ''' Allows to add one or more sort on specific fields. Each sort can be reversed as well. The sort is defined on a per field level, with special field name for _score to sort by score. ''' if not self.params: self.params = dict() self.params['sort'] = fsort return self
Allows to add one or more sort on specific fields. Each sort can be reversed as well. The sort is defined on a per field level, with special field name for _score to sort by score.
def search_simple(self, index,itype, key, search_term): ''' ElasticSearch.search_simple(index,itype,key,search_term) Usage: > es = ElasticSearch() > es.search_simple('twitter','users','name','kim') ''' request = self.session url = 'http://%s:%s/%s/%s/_search?q=%s:%s' % (self.host,self.port,index,itype,key,search_term) response = request.get(url) return responsf search_simple(self, index,itype, key, search_term): ''' ElasticSearch.search_simple(index,itype,key,search_term) Usage: > es = ElasticSearch() > es.search_simple('twitter','users','name','kim') ''' request = self.session url = 'http://%s:%s/%s/%s/_search?q=%s:%s' % (self.host,self.port,index,itype,key,search_term) response = request.get(url) return response
ElasticSearch.search_simple(index,itype,key,search_term) Usage: > es = ElasticSearch() > es.search_simple('twitter','users','name','kim')
def search_advanced(self, index, itype, query): ''' Advanced search interface using specified query > query = ElasticQuery().term(user='kimchy') > ElasticSearch().search_advanced('twitter','posts',query) ... Search results ... ''' request = self.session url = 'http://%s:%s/%s/%s/_search' % (self.host,self.port,index,itype) if self.params: query_header = dict(query=query, **self.params) else: query_header = dict(query=query) if self.verbose: print query_header response = request.post(url,query_header) return responsf search_advanced(self, index, itype, query): ''' Advanced search interface using specified query > query = ElasticQuery().term(user='kimchy') > ElasticSearch().search_advanced('twitter','posts',query) ... Search results ... ''' request = self.session url = 'http://%s:%s/%s/%s/_search' % (self.host,self.port,index,itype) if self.params: query_header = dict(query=query, **self.params) else: query_header = dict(query=query) if self.verbose: print query_header response = request.post(url,query_header) return response
Advanced search interface using specified query > query = ElasticQuery().term(user='kimchy') > ElasticSearch().search_advanced('twitter','posts',query) ... Search results ...
def doc_create(self,index,itype,value): ''' Creates a document ''' request = self.session url = 'http://%s:%s/%s/%s/' % (self.host, self.port, index, itype) if self.verbose: print value response = request.post(url,value) return responsf doc_create(self,index,itype,value): ''' Creates a document ''' request = self.session url = 'http://%s:%s/%s/%s/' % (self.host, self.port, index, itype) if self.verbose: print value response = request.post(url,value) return response
Creates a document
def search_index_simple(self,index,key,search_term): ''' Search the index using a simple key and search_term @param index Name of the index @param key Search Key @param search_term The term to be searched for ''' request = self.session url = 'http://%s:%s/%s/_search?q=%s:%s' % (self.host,self.port,index,key,search_term) response = request.get(url) return responsf search_index_simple(self,index,key,search_term): ''' Search the index using a simple key and search_term @param index Name of the index @param key Search Key @param search_term The term to be searched for ''' request = self.session url = 'http://%s:%s/%s/_search?q=%s:%s' % (self.host,self.port,index,key,search_term) response = request.get(url) return response
Search the index using a simple key and search_term @param index Name of the index @param key Search Key @param search_term The term to be searched for
def search_index_advanced(self, index, query): ''' Advanced search query against an entire index > query = ElasticQuery().query_string(query='imchi') > search = ElasticSearch() ''' request = self.session url = 'http://%s:%s/%s/_search' % (self.host, self.port, index) if self.params: content = dict(query=query, **self.params) else: content = dict(query=query) if self.verbose: print content response = request.post(url,content) return responsf search_index_advanced(self, index, query): ''' Advanced search query against an entire index > query = ElasticQuery().query_string(query='imchi') > search = ElasticSearch() ''' request = self.session url = 'http://%s:%s/%s/_search' % (self.host, self.port, index) if self.params: content = dict(query=query, **self.params) else: content = dict(query=query) if self.verbose: print content response = request.post(url,content) return response
Advanced search query against an entire index > query = ElasticQuery().query_string(query='imchi') > search = ElasticSearch()
def index_create(self, index, number_of_shards=5,number_of_replicas=1): ''' Creates the specified index > search = ElasticSearch() > search.index_create('twitter') {"ok":true,"acknowledged":true} ''' request = self.session content = {'settings' : dict(number_of_shards=number_of_shards, number_of_replicas=number_of_replicas)} if self.verbose: print content url = 'http://%s:%s/%s' % (self.host, self.port, index) response = request.put(url,content) return responsf index_create(self, index, number_of_shards=5,number_of_replicas=1): ''' Creates the specified index > search = ElasticSearch() > search.index_create('twitter') {"ok":true,"acknowledged":true} ''' request = self.session content = {'settings' : dict(number_of_shards=number_of_shards, number_of_replicas=number_of_replicas)} if self.verbose: print content url = 'http://%s:%s/%s' % (self.host, self.port, index) response = request.put(url,content) return response
Creates the specified index > search = ElasticSearch() > search.index_create('twitter') {"ok":true,"acknowledged":true}
def index_delete(self, index): ''' Delets the specified index > search = ElasticSearch() > search.index_delete('twitter') {"ok" : True, "acknowledged" : True } ''' request = self.session url = 'http://%s:%s/%s' % (self.host, self.port, index) response = request.delete(url) return responsf index_delete(self, index): ''' Delets the specified index > search = ElasticSearch() > search.index_delete('twitter') {"ok" : True, "acknowledged" : True } ''' request = self.session url = 'http://%s:%s/%s' % (self.host, self.port, index) response = request.delete(url) return response
Delets the specified index > search = ElasticSearch() > search.index_delete('twitter') {"ok" : True, "acknowledged" : True }
def index_open(self, index): ''' Opens the speicified index. http://www.elasticsearch.org/guide/reference/api/admin-indices-open-close.html > ElasticSearch().index_open('my_index') ''' request = self.session url = 'http://%s:%s/%s/_open' % (self.host, self.port, index) response = request.post(url,None) return responsf index_open(self, index): ''' Opens the speicified index. http://www.elasticsearch.org/guide/reference/api/admin-indices-open-close.html > ElasticSearch().index_open('my_index') ''' request = self.session url = 'http://%s:%s/%s/_open' % (self.host, self.port, index) response = request.post(url,None) return response
Opens the speicified index. http://www.elasticsearch.org/guide/reference/api/admin-indices-open-close.html > ElasticSearch().index_open('my_index')
def river_couchdb_delete(self, index_name): ''' https://github.com/elasticsearch/elasticsearch-river-couchdb Delete's a river for the specified index WARNING: It DOES NOT delete the index, only the river, so the only effects of this are that the index will no longer poll CouchDB for updates. ''' request = self.session url = 'http://%s:%s/_river/%s' % (self.host, self.port, index_name) response = request.delete(url) return responsf river_couchdb_delete(self, index_name): ''' https://github.com/elasticsearch/elasticsearch-river-couchdb Delete's a river for the specified index WARNING: It DOES NOT delete the index, only the river, so the only effects of this are that the index will no longer poll CouchDB for updates. ''' request = self.session url = 'http://%s:%s/_river/%s' % (self.host, self.port, index_name) response = request.delete(url) return response
https://github.com/elasticsearch/elasticsearch-river-couchdb Delete's a river for the specified index WARNING: It DOES NOT delete the index, only the river, so the only effects of this are that the index will no longer poll CouchDB for updates.
def index_list(self): ''' Lists indices ''' request = self.session url = 'http://%s:%s/_cluster/state/' % (self.host, self.port) response = request.get(url) if request.status_code==200: return response.get('metadata',{}).get('indices',{}).keys() else: return responsf index_list(self): ''' Lists indices ''' request = self.session url = 'http://%s:%s/_cluster/state/' % (self.host, self.port) response = request.get(url) if request.status_code==200: return response.get('metadata',{}).get('indices',{}).keys() else: return response
Lists indices
def map(self,index_name, index_type, map_value): ''' Enable a specific map for an index and type ''' request = self.session url = 'http://%s:%s/%s/%s/_mapping' % (self.host, self.port, index_name, index_type) content = { index_type : { 'properties' : map_value } } if self.verbose: print content response = request.put(url,content) return responsf map(self,index_name, index_type, map_value): ''' Enable a specific map for an index and type ''' request = self.session url = 'http://%s:%s/%s/%s/_mapping' % (self.host, self.port, index_name, index_type) content = { index_type : { 'properties' : map_value } } if self.verbose: print content response = request.put(url,content) return response
Enable a specific map for an index and type
def list_types(index_name, host='localhost',port='9200'): ''' Lists the context types available in an index ''' return ElasticSearch(host=host, port=port).type_list(index_namef list_types(index_name, host='localhost',port='9200'): ''' Lists the context types available in an index ''' return ElasticSearch(host=host, port=port).type_list(index_name)
Lists the context types available in an index
def type_list(self, index_name): ''' List the types available in an index ''' request = self.session url = 'http://%s:%s/%s/_mapping' % (self.host, self.port, index_name) response = request.get(url) if request.status_code == 200: return response[index_name].keys() else: return responsf type_list(self, index_name): ''' List the types available in an index ''' request = self.session url = 'http://%s:%s/%s/_mapping' % (self.host, self.port, index_name) response = request.get(url) if request.status_code == 200: return response[index_name].keys() else: return response
List the types available in an index
def raw(self, module, method='GET', data=None): ''' Submits or requsts raw input ''' request = self.session url = 'http://%s:%s/%s' % (self.host, self.port, module) if self.verbose: print data if method=='GET': response = request.get(url) elif method=='POST': response = request.post(url,data) elif method=='PUT': response = request.put(url,data) elif method=='DELETE': response = request.delete(url) else: return {'error' : 'No such request method %s' % method} return responsf raw(self, module, method='GET', data=None): ''' Submits or requsts raw input ''' request = self.session url = 'http://%s:%s/%s' % (self.host, self.port, module) if self.verbose: print data if method=='GET': response = request.get(url) elif method=='POST': response = request.post(url,data) elif method=='PUT': response = request.put(url,data) elif method=='DELETE': response = request.delete(url) else: return {'error' : 'No such request method %s' % method} return response
Submits or requsts raw input