code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def marker(self, marker_name=None, label=None, color=None, retina=False): """Returns a single marker image without any background map. Parameters ---------- marker_name : str The marker's shape and size. label : str, optional The marker's alphanumeric label. Options are a through z, 0 through 99, or the name of a valid Maki icon. color : str, optional The marker's color. Options are three- or six-digit hexadecimal color codes. retina : bool, optional The marker's scale, where True indicates Retina scale (double scale) and False indicates regular scale. The default value is false. Returns ------- request.Response The response object with the specified marker. """ # Check for marker_name. if marker_name is None: raise ValidationError( "marker_name is a required argument" ) # Validate marker_name and retina. marker_name = self._validate_marker_name(marker_name) retina = self._validate_retina(retina) # Create dict and start building URI resource path. path_values = dict( marker_name=marker_name ) path_part = "/marker/{marker_name}" # Validate label, update dict, # and continue building URI resource path. if label is not None: label = self._validate_label(label) path_values["label"] = label path_part += "-{label}" # Validate color, update dict, # and continue building URI resource path. if color is not None: color = self._validate_color(color) path_values["color"] = color path_part += "+{color}" uri = URITemplate(self.base_uri + path_part).expand(**path_values) # Finish building URI resource path. path_part = "{}.png".format(retina) uri += path_part # Send HTTP GET request. response = self.session.get(uri) self.handle_http_error(response) return response
Returns a single marker image without any background map. Parameters ---------- marker_name : str The marker's shape and size. label : str, optional The marker's alphanumeric label. Options are a through z, 0 through 99, or the name of a valid Maki icon. color : str, optional The marker's color. Options are three- or six-digit hexadecimal color codes. retina : bool, optional The marker's scale, where True indicates Retina scale (double scale) and False indicates regular scale. The default value is false. Returns ------- request.Response The response object with the specified marker.
def logical_xor(self, other): """logical_xor(t) = self(t) ^ other(t).""" return self.operation(other, lambda x, y: int(bool(x) ^ bool(y)))
logical_xor(t) = self(t) ^ other(t).
def process_raw_file(self, raw_file_name, field_names): """ takes the filename to be read and uses the maps setup on class instantiation to process the file. This is a top level function and uses self.maps which should be the column descriptions (in order). """ #num_outouts = 0 dist_vals = [] group_dat = [] events = [] #facts = [] with open(raw_file_name) as csvfile: reader = csv.DictReader(csvfile, fieldnames = field_names) for num_lines, row in enumerate(reader): #print('row = =',row) for col_num, fld in enumerate(field_names): try: #print('self.maps[', col_num, '] = ', self.maps[col_num]) if self.maps[col_num].val == 'group_distinct': group_dat.append(str(row[fld])) elif self.maps[col_num].val == 'event_date': events.append(str(row[fld])) except Exception as ex: print('parsing error - shouldnt really be splitting using a comma anyway!', str(ex)) dist_vals = sorted(list(set(group_dat))) return num_lines, dist_vals, group_dat, sorted(list(set(events)))
takes the filename to be read and uses the maps setup on class instantiation to process the file. This is a top level function and uses self.maps which should be the column descriptions (in order).
def gmres_mgs(A, b, x0=None, tol=1e-5, restrt=None, maxiter=None, xtype=None, M=None, callback=None, residuals=None, reorth=False): """Generalized Minimum Residual Method (GMRES) based on MGS. GMRES iteratively refines the initial solution guess to the system Ax = b Modified Gram-Schmidt version Parameters ---------- A : array, matrix, sparse matrix, LinearOperator n x n, linear system to solve b : array, matrix right hand side, shape is (n,) or (n,1) x0 : array, matrix initial guess, default is a vector of zeros tol : float relative convergence tolerance, i.e. tol is scaled by the norm of the initial preconditioned residual restrt : None, int - if int, restrt is max number of inner iterations and maxiter is the max number of outer iterations - if None, do not restart GMRES, and max number of inner iterations is maxiter maxiter : None, int - if restrt is None, maxiter is the max number of inner iterations and GMRES does not restart - if restrt is int, maxiter is the max number of outer iterations, and restrt is the max number of inner iterations xtype : type dtype for the solution, default is automatic type detection M : array, matrix, sparse matrix, LinearOperator n x n, inverted preconditioner, i.e. solve M A x = M b. callback : function User-supplied function is called after each iteration as callback(xk), where xk is the current solution vector residuals : list residuals contains the preconditioned residual norm history, including the initial residual. reorth : boolean If True, then a check is made whether to re-orthogonalize the Krylov space each GMRES iteration Returns ------- (xNew, info) xNew : an updated guess to the solution of Ax = b info : halting status of gmres == ============================================= 0 successful exit >0 convergence to tolerance not achieved, return iteration count instead. This value is precisely the order of the Krylov space. <0 numerical breakdown, or illegal input == ============================================= Notes ----- - The LinearOperator class is in scipy.sparse.linalg.interface. Use this class if you prefer to define A or M as a mat-vec routine as opposed to explicitly constructing the matrix. A.psolve(..) is still supported as a legacy. - For robustness, modified Gram-Schmidt is used to orthogonalize the Krylov Space Givens Rotations are used to provide the residual norm each iteration Examples -------- >>> from pyamg.krylov import gmres >>> from pyamg.util.linalg import norm >>> import numpy as np >>> from pyamg.gallery import poisson >>> A = poisson((10,10)) >>> b = np.ones((A.shape[0],)) >>> (x,flag) = gmres(A,b, maxiter=2, tol=1e-8, orthog='mgs') >>> print norm(b - A*x) >>> 6.5428213057 References ---------- .. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems, Second Edition", SIAM, pp. 151-172, pp. 272-275, 2003 http://www-users.cs.umn.edu/~saad/books.html .. [2] C. T. Kelley, http://www4.ncsu.edu/~ctk/matlab_roots.html """ # Convert inputs to linear system, with error checking A, M, x, b, postprocess = make_system(A, M, x0, b) dimen = A.shape[0] # Ensure that warnings are always reissued from this function import warnings warnings.filterwarnings('always', module='pyamg\.krylov\._gmres_mgs') # Choose type if not hasattr(A, 'dtype'): Atype = upcast(x.dtype, b.dtype) else: Atype = A.dtype if not hasattr(M, 'dtype'): Mtype = upcast(x.dtype, b.dtype) else: Mtype = M.dtype xtype = upcast(Atype, x.dtype, b.dtype, Mtype) if restrt is not None: restrt = int(restrt) if maxiter is not None: maxiter = int(maxiter) # Get fast access to underlying BLAS routines # dotc is the conjugate dot, dotu does no conjugation [lartg] = get_lapack_funcs(['lartg'], [x]) if np.iscomplexobj(np.zeros((1,), dtype=xtype)): [axpy, dotu, dotc, scal] =\ get_blas_funcs(['axpy', 'dotu', 'dotc', 'scal'], [x]) else: # real type [axpy, dotu, dotc, scal] =\ get_blas_funcs(['axpy', 'dot', 'dot', 'scal'], [x]) # Make full use of direct access to BLAS by defining own norm def norm(z): return np.sqrt(np.real(dotc(z, z))) # Should norm(r) be kept if residuals == []: keep_r = True else: keep_r = False # Set number of outer and inner iterations if restrt: if maxiter: max_outer = maxiter else: max_outer = 1 if restrt > dimen: warn('Setting number of inner iterations (restrt) to maximum\ allowed, which is A.shape[0] ') restrt = dimen max_inner = restrt else: max_outer = 1 if maxiter > dimen: warn('Setting number of inner iterations (maxiter) to maximum\ allowed, which is A.shape[0] ') maxiter = dimen elif maxiter is None: maxiter = min(dimen, 40) max_inner = maxiter # Is this a one dimensional matrix? if dimen == 1: entry = np.ravel(A*np.array([1.0], dtype=xtype)) return (postprocess(b/entry), 0) # Prep for method r = b - np.ravel(A*x) # Apply preconditioner r = np.ravel(M*r) normr = norm(r) if keep_r: residuals.append(normr) # Check for nan, inf # if isnan(r).any() or isinf(r).any(): # warn('inf or nan after application of preconditioner') # return(postprocess(x), -1) # Check initial guess ( scaling by b, if b != 0, # must account for case when norm(b) is very small) normb = norm(b) if normb == 0.0: normb = 1.0 if normr < tol*normb: return (postprocess(x), 0) # Scale tol by ||r_0||_2, we use the preconditioned residual # because this is left preconditioned GMRES. if normr != 0.0: tol = tol*normr # Use separate variable to track iterations. If convergence fails, we # cannot simply report niter = (outer-1)*max_outer + inner. Numerical # error could cause the inner loop to halt while the actual ||r|| > tol. niter = 0 # Begin GMRES for outer in range(max_outer): # Preallocate for Givens Rotations, Hessenberg matrix and Krylov Space # Space required is O(dimen*max_inner). # NOTE: We are dealing with row-major matrices, so we traverse in a # row-major fashion, # i.e., H and V's transpose is what we store. Q = [] # Givens Rotations # Upper Hessenberg matrix, which is then # converted to upper tri with Givens Rots H = np.zeros((max_inner+1, max_inner+1), dtype=xtype) V = np.zeros((max_inner+1, dimen), dtype=xtype) # Krylov Space # vs store the pointers to each column of V. # This saves a considerable amount of time. vs = [] # v = r/normr V[0, :] = scal(1.0/normr, r) vs.append(V[0, :]) # This is the RHS vector for the problem in the Krylov Space g = np.zeros((dimen,), dtype=xtype) g[0] = normr for inner in range(max_inner): # New Search Direction v = V[inner+1, :] v[:] = np.ravel(M*(A*vs[-1])) vs.append(v) normv_old = norm(v) # Check for nan, inf # if isnan(V[inner+1, :]).any() or isinf(V[inner+1, :]).any(): # warn('inf or nan after application of preconditioner') # return(postprocess(x), -1) # Modified Gram Schmidt for k in range(inner+1): vk = vs[k] alpha = dotc(vk, v) H[inner, k] = alpha v[:] = axpy(vk, v, dimen, -alpha) normv = norm(v) H[inner, inner+1] = normv # Re-orthogonalize if (reorth is True) and (normv_old == normv_old + 0.001*normv): for k in range(inner+1): vk = vs[k] alpha = dotc(vk, v) H[inner, k] = H[inner, k] + alpha v[:] = axpy(vk, v, dimen, -alpha) # Check for breakdown if H[inner, inner+1] != 0.0: v[:] = scal(1.0/H[inner, inner+1], v) # Apply previous Givens rotations to H if inner > 0: apply_givens(Q, H[inner, :], inner) # Calculate and apply next complex-valued Givens Rotation # ==> Note that if max_inner = dimen, then this is unnecessary # for the last inner # iteration, when inner = dimen-1. if inner != dimen-1: if H[inner, inner+1] != 0: [c, s, r] = lartg(H[inner, inner], H[inner, inner+1]) Qblock = np.array([[c, s], [-np.conjugate(s), c]], dtype=xtype) Q.append(Qblock) # Apply Givens Rotation to g, # the RHS for the linear system in the Krylov Subspace. g[inner:inner+2] = np.dot(Qblock, g[inner:inner+2]) # Apply effect of Givens Rotation to H H[inner, inner] = dotu(Qblock[0, :], H[inner, inner:inner+2]) H[inner, inner+1] = 0.0 niter += 1 # Don't update normr if last inner iteration, because # normr is calculated directly after this loop ends. if inner < max_inner-1: normr = np.abs(g[inner+1]) if normr < tol: break # Allow user access to the iterates if callback is not None: callback(x) if keep_r: residuals.append(normr) # end inner loop, back to outer loop # Find best update to x in Krylov Space V. Solve inner x inner system. y = sp.linalg.solve(H[0:inner+1, 0:inner+1].T, g[0:inner+1]) update = np.ravel(np.mat(V[:inner+1, :]).T*y.reshape(-1, 1)) x = x + update r = b - np.ravel(A*x) # Apply preconditioner r = np.ravel(M*r) normr = norm(r) # Check for nan, inf # if isnan(r).any() or isinf(r).any(): # warn('inf or nan after application of preconditioner') # return(postprocess(x), -1) # Allow user access to the iterates if callback is not None: callback(x) if keep_r: residuals.append(normr) # Has GMRES stagnated? indices = (x != 0) if indices.any(): change = np.max(np.abs(update[indices] / x[indices])) if change < 1e-12: # No change, halt return (postprocess(x), -1) # test for convergence if normr < tol: return (postprocess(x), 0) # end outer loop return (postprocess(x), niter)
Generalized Minimum Residual Method (GMRES) based on MGS. GMRES iteratively refines the initial solution guess to the system Ax = b Modified Gram-Schmidt version Parameters ---------- A : array, matrix, sparse matrix, LinearOperator n x n, linear system to solve b : array, matrix right hand side, shape is (n,) or (n,1) x0 : array, matrix initial guess, default is a vector of zeros tol : float relative convergence tolerance, i.e. tol is scaled by the norm of the initial preconditioned residual restrt : None, int - if int, restrt is max number of inner iterations and maxiter is the max number of outer iterations - if None, do not restart GMRES, and max number of inner iterations is maxiter maxiter : None, int - if restrt is None, maxiter is the max number of inner iterations and GMRES does not restart - if restrt is int, maxiter is the max number of outer iterations, and restrt is the max number of inner iterations xtype : type dtype for the solution, default is automatic type detection M : array, matrix, sparse matrix, LinearOperator n x n, inverted preconditioner, i.e. solve M A x = M b. callback : function User-supplied function is called after each iteration as callback(xk), where xk is the current solution vector residuals : list residuals contains the preconditioned residual norm history, including the initial residual. reorth : boolean If True, then a check is made whether to re-orthogonalize the Krylov space each GMRES iteration Returns ------- (xNew, info) xNew : an updated guess to the solution of Ax = b info : halting status of gmres == ============================================= 0 successful exit >0 convergence to tolerance not achieved, return iteration count instead. This value is precisely the order of the Krylov space. <0 numerical breakdown, or illegal input == ============================================= Notes ----- - The LinearOperator class is in scipy.sparse.linalg.interface. Use this class if you prefer to define A or M as a mat-vec routine as opposed to explicitly constructing the matrix. A.psolve(..) is still supported as a legacy. - For robustness, modified Gram-Schmidt is used to orthogonalize the Krylov Space Givens Rotations are used to provide the residual norm each iteration Examples -------- >>> from pyamg.krylov import gmres >>> from pyamg.util.linalg import norm >>> import numpy as np >>> from pyamg.gallery import poisson >>> A = poisson((10,10)) >>> b = np.ones((A.shape[0],)) >>> (x,flag) = gmres(A,b, maxiter=2, tol=1e-8, orthog='mgs') >>> print norm(b - A*x) >>> 6.5428213057 References ---------- .. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems, Second Edition", SIAM, pp. 151-172, pp. 272-275, 2003 http://www-users.cs.umn.edu/~saad/books.html .. [2] C. T. Kelley, http://www4.ncsu.edu/~ctk/matlab_roots.html
def logout(config): """Remove and forget your Bugzilla credentials""" state = read(config.configfile) if state.get("BUGZILLA"): remove(config.configfile, "BUGZILLA") success_out("Forgotten") else: error_out("No stored Bugzilla credentials")
Remove and forget your Bugzilla credentials
def flatten(l, types=(list, )): """ Given a list/tuple that potentially contains nested lists/tuples of arbitrary nesting, flatten into a single dimension. In other words, turn [(5, 6, [8, 3]), 2, [2, 1, (3, 4)]] into [5, 6, 8, 3, 2, 2, 1, 3, 4] This is safe to call on something not a list/tuple - the original input is returned as a list """ # For backwards compatibility, this returned a list, not an iterable. # Changing to return an iterable could break things. if not isinstance(l, types): return l return list(flattened_iterator(l, types))
Given a list/tuple that potentially contains nested lists/tuples of arbitrary nesting, flatten into a single dimension. In other words, turn [(5, 6, [8, 3]), 2, [2, 1, (3, 4)]] into [5, 6, 8, 3, 2, 2, 1, 3, 4] This is safe to call on something not a list/tuple - the original input is returned as a list
def Percentile(pmf, percentage): """Computes a percentile of a given Pmf. percentage: float 0-100 """ p = percentage / 100.0 total = 0 for val, prob in pmf.Items(): total += prob if total >= p: return val
Computes a percentile of a given Pmf. percentage: float 0-100
def detect_version(env, cc): """Return the version of the GNU compiler, or None if it is not a GNU compiler.""" cc = env.subst(cc) if not cc: return None version = None #pipe = SCons.Action._subproc(env, SCons.Util.CLVar(cc) + ['-dumpversion'], pipe = SCons.Action._subproc(env, SCons.Util.CLVar(cc) + ['--version'], stdin = 'devnull', stderr = 'devnull', stdout = subprocess.PIPE) # -dumpversion was added in GCC 3.0. As long as we're supporting # GCC versions older than that, we should use --version and a # regular expression. #line = pipe.stdout.read().strip() #if line: # version = line line = SCons.Util.to_str(pipe.stdout.readline()) match = re.search(r'[0-9]+(\.[0-9]+)+', line) if match: version = match.group(0) # Non-GNU compiler's output (like AIX xlc's) may exceed the stdout buffer: # So continue with reading to let the child process actually terminate. while SCons.Util.to_str(pipe.stdout.readline()): pass ret = pipe.wait() if ret != 0: return None return version
Return the version of the GNU compiler, or None if it is not a GNU compiler.
def calculate_lvgd_voltage_current_stats(nw): """ LV Voltage and Current Statistics for an arbitrary network Note ---- Aggregated Load Areas are excluded. Parameters ---------- nw: :any:`list` of NetworkDing0 The MV grid(s) to be studied Returns ------- pandas.DataFrame nodes_df : Dataframe containing voltage, respectively current, statis for every critical node, resp. every critical station, in every LV grid in nw. pandas.DataFrame lines_df : Dataframe containing current statistics for every critical line, in every LV grid in nw. """ ############################## # close circuit breakers nw.control_circuit_breakers(mode='close') ############################## nodes_idx = 0 nodes_dict = {} branches_idx = 0 branches_dict = {} for mv_district in nw.mv_grid_districts(): for LA in mv_district.lv_load_areas(): if not LA.is_aggregated: for lv_district in LA.lv_grid_districts(): # nodes voltage crit_nodes = get_critical_voltage_at_nodes(lv_district.lv_grid) for node in crit_nodes: nodes_idx += 1 nodes_dict[nodes_idx] = { 'MV_grid_id': mv_district.mv_grid.id_db, 'LV_grid_id': lv_district.lv_grid.id_db, 'LA_id': LA.id_db, 'node id': node['node'].__repr__(), 'v_diff_0': node['v_diff'][0], 'v_diff_1': node['v_diff'][1], 's_max_0': 'NA', 's_max_1': 'NA', 'V nominal': lv_district.lv_grid.v_level, } # branches currents critical_branches, critical_stations = get_critical_line_loading(lv_district.lv_grid) for branch in critical_branches: branches_idx += 1 branches_dict[branches_idx] = { 'MV_grid_id': mv_district.mv_grid.id_db, 'LV_grid_id': lv_district.lv_grid.id_db, 'LA_id': LA.id_db, 'branch id': branch['branch'].__repr__(), 's_max_0': branch['s_max'][0], 's_max_1': branch['s_max'][1], } # stations for node in critical_stations: nodes_idx += 1 nodes_dict[nodes_idx] = { 'MV_grid_id': mv_district.mv_grid.id_db, 'LV_grid_id': lv_district.lv_grid.id_db, 'LA_id': LA.id_db, 'node id': node['station'].__repr__(), 's_max_0': node['s_max'][0], 's_max_1': node['s_max'][1], 'v_diff_0': 'NA', 'v_diff_1': 'NA', } nodes_df = pd.DataFrame.from_dict(nodes_dict, orient='index') branches_df = pd.DataFrame.from_dict(branches_dict, orient='index') if not nodes_df.empty: nodes_df = nodes_df.set_index('node id') nodes_df = nodes_df.fillna(0) nodes_df = nodes_df[sorted(nodes_df.columns.tolist())] nodes_df.sort_index(inplace=True) if not branches_df.empty: branches_df = branches_df.set_index('branch id') branches_df = branches_df.fillna(0) branches_df = branches_df[sorted(branches_df.columns.tolist())] branches_df.sort_index(inplace=True) return nodes_df, branches_df
LV Voltage and Current Statistics for an arbitrary network Note ---- Aggregated Load Areas are excluded. Parameters ---------- nw: :any:`list` of NetworkDing0 The MV grid(s) to be studied Returns ------- pandas.DataFrame nodes_df : Dataframe containing voltage, respectively current, statis for every critical node, resp. every critical station, in every LV grid in nw. pandas.DataFrame lines_df : Dataframe containing current statistics for every critical line, in every LV grid in nw.
def jco_from_pestpp_runstorage(rnj_filename,pst_filename): """ read pars and obs from a pest++ serialized run storage file (e.g., .rnj) and return pyemu.Jco. This can then be passed to Jco.to_binary or Jco.to_coo, etc., to write jco file in a subsequent step to avoid memory resource issues associated with very large problems. Parameters ---------- rnj_filename : str the name of the run storage file pst_filename : str the name of the pst file Returns ------- jco_cols : pyemu.Jco TODO ---- Check rnj file contains transformed par vals (i.e., in model input space) Currently only returns pyemu.Jco; doesn't write jco file due to memory issues associated with very large problems Compare rnj and jco from Freyberg problem in autotests """ header_dtype = np.dtype([("n_runs",np.int64),("run_size",np.int64),("p_name_size",np.int64), ("o_name_size",np.int64)]) pst = pyemu.Pst(pst_filename) par = pst.parameter_data log_pars = set(par.loc[par.partrans=="log","parnme"].values) with open(rnj_filename,'rb') as f: header = np.fromfile(f,dtype=header_dtype,count=1) try: base_par,base_obs = read_pestpp_runstorage(rnj_filename,irun=0) except: raise Exception("couldn't get base run...") par = par.loc[base_par.index,:] li = base_par.index.map(lambda x: par.loc[x,"partrans"]=="log") base_par.loc[li] = base_par.loc[li].apply(np.log10) jco_cols = {} for irun in range(1,int(header["n_runs"])): par_df,obs_df = read_pestpp_runstorage(rnj_filename,irun=irun) par_df.loc[li] = par_df.loc[li].apply(np.log10) obs_diff = base_obs - obs_df par_diff = base_par - par_df # check only one non-zero element per col(par) if len(par_diff[par_diff.parval1 != 0]) > 1: raise Exception("more than one par diff - looks like the file wasn't created during jco filling...") parnme = par_diff[par_diff.parval1 != 0].index[0] parval = par_diff.parval1.loc[parnme] # derivatives jco_col = obs_diff / parval # some tracking, checks print("processing par {0}: {1}...".format(irun, parnme)) print("%nzsens: {0}%...".format((jco_col[abs(jco_col.obsval)>1e-8].shape[0] / jco_col.shape[0])*100.)) jco_cols[parnme] = jco_col.obsval jco_cols = pd.DataFrame.from_records(data=jco_cols, index=list(obs_diff.index.values)) jco_cols = pyemu.Jco.from_dataframe(jco_cols) # write # memory considerations important here for very large matrices - break into chunks... #jco_fnam = "{0}".format(filename[:-4]+".jco") #jco_cols.to_binary(filename=jco_fnam, droptol=None, chunk=None) return jco_cols
read pars and obs from a pest++ serialized run storage file (e.g., .rnj) and return pyemu.Jco. This can then be passed to Jco.to_binary or Jco.to_coo, etc., to write jco file in a subsequent step to avoid memory resource issues associated with very large problems. Parameters ---------- rnj_filename : str the name of the run storage file pst_filename : str the name of the pst file Returns ------- jco_cols : pyemu.Jco TODO ---- Check rnj file contains transformed par vals (i.e., in model input space) Currently only returns pyemu.Jco; doesn't write jco file due to memory issues associated with very large problems Compare rnj and jco from Freyberg problem in autotests
def find_file_regex(root_dir,re_expression,return_abs_path = True,search_sub_directories = True): ''' Finds all the files with the specified root directory with the name matching the regex expression. Args : root_dir : The root directory. re_expression : The regex expression. return_abs_path : If set to true, returns the absolute path of the files, else returns the name of the files. search_sub_directories : If set to true, searches sub directories recursivly. ''' compiled = re.compile(re_expression) result = [] for dirpath, dirnames, files in os.walk(root_dir) : #Select files matching the expression for file in files : if compiled.match(file): result.append(os.path.join(dirpath,file) if return_abs_path else file ) #Break if no sub-directories if not search_sub_directories : break return result
Finds all the files with the specified root directory with the name matching the regex expression. Args : root_dir : The root directory. re_expression : The regex expression. return_abs_path : If set to true, returns the absolute path of the files, else returns the name of the files. search_sub_directories : If set to true, searches sub directories recursivly.
def regression(): """ Run regression testing - lint and then run all tests. """ # HACK: Start using hitchbuildpy to get around this. Command("touch", DIR.project.joinpath("pathquery", "__init__.py").abspath()).run() storybook = _storybook({}).only_uninherited() #storybook.with_params(**{"python version": "2.7.10"})\ #.ordered_by_name().play() Command("touch", DIR.project.joinpath("pathquery", "__init__.py").abspath()).run() storybook.with_params(**{"python version": "3.5.0"}).ordered_by_name().play() lint()
Run regression testing - lint and then run all tests.
def check_dataset(dataset): """Confirm shape (3 colors x rows x cols) and values [0 to 255] are OK.""" if isinstance(dataset, numpy.ndarray) and not len(dataset.shape) == 4: check_dataset_shape(dataset) check_dataset_range(dataset) else: # must be a list of arrays or a 4D NumPy array for i, d in enumerate(dataset): if not isinstance(d, numpy.ndarray): raise ValueError( 'Requires a NumPy array (rgb x rows x cols) ' 'with integer values in the range [0, 255].' ) try: check_dataset_shape(d) check_dataset_range(d) except ValueError as err: raise ValueError( '{}\nAt position {} in the list of arrays.' .format(err, i) )
Confirm shape (3 colors x rows x cols) and values [0 to 255] are OK.
def get(self, sid): """ Constructs a FaxMediaContext :param sid: The unique string that identifies the resource to fetch :returns: twilio.rest.fax.v1.fax.fax_media.FaxMediaContext :rtype: twilio.rest.fax.v1.fax.fax_media.FaxMediaContext """ return FaxMediaContext(self._version, fax_sid=self._solution['fax_sid'], sid=sid, )
Constructs a FaxMediaContext :param sid: The unique string that identifies the resource to fetch :returns: twilio.rest.fax.v1.fax.fax_media.FaxMediaContext :rtype: twilio.rest.fax.v1.fax.fax_media.FaxMediaContext
def create_bv_bitmap(dot_product_vector: str, dot_product_bias: str) -> Dict[str, str]: """ This function creates a map from bitstring to function value for a boolean formula :math:`f` with a dot product vector :math:`a` and a dot product bias :math:`b` .. math:: f:\\{0,1\\}^n\\rightarrow \\{0,1\\} \\mathbf{x}\\rightarrow \\mathbf{a}\\cdot\\mathbf{x}+b\\pmod{2} (\\mathbf{a}\\in\\{0,1\\}^n, b\\in\\{0,1\\}) :param dot_product_vector: a string of 0's and 1's that represents the dot-product partner in :math:`f` :param dot_product_bias: 0 or 1 as a string representing the bias term in :math:`f` :return: A dictionary containing all possible bitstring of length equal to :math:`a` and the function value :math:`f` """ n_bits = len(dot_product_vector) bit_map = {} for bit_val in range(2 ** n_bits): bit_map[np.binary_repr(bit_val, width=n_bits)] = str( (int(utils.bitwise_dot_product(np.binary_repr(bit_val, width=n_bits), dot_product_vector)) + int(dot_product_bias, 2)) % 2 ) return bit_map
This function creates a map from bitstring to function value for a boolean formula :math:`f` with a dot product vector :math:`a` and a dot product bias :math:`b` .. math:: f:\\{0,1\\}^n\\rightarrow \\{0,1\\} \\mathbf{x}\\rightarrow \\mathbf{a}\\cdot\\mathbf{x}+b\\pmod{2} (\\mathbf{a}\\in\\{0,1\\}^n, b\\in\\{0,1\\}) :param dot_product_vector: a string of 0's and 1's that represents the dot-product partner in :math:`f` :param dot_product_bias: 0 or 1 as a string representing the bias term in :math:`f` :return: A dictionary containing all possible bitstring of length equal to :math:`a` and the function value :math:`f`
def _node_add_with_peer_list(self, child_self, child_other): '''_node_add_with_peer_list Low-level api: Apply delta child_other to child_self when child_self is the peer of child_other. Element child_self and child_other are list nodes. Element child_self will be modified during the process. RFC6020 section 7.8.6 is a reference of this method. Parameters ---------- child_self : `Element` A child of a config node in a config tree. child_other : `Element` A child of a config node in another config tree. child_self is the peer of child_other. Returns ------- None There is no return of this method. ''' parent_self = child_self.getparent() s_node = self.device.get_schema_node(child_self) if child_other.get(operation_tag) != 'delete' and \ child_other.get(operation_tag) != 'remove' and \ s_node.get('ordered-by') == 'user' and \ child_other.get(insert_tag) is not None: if child_other.get(insert_tag) == 'first': scope = parent_self.getchildren() siblings = self._get_sequence(scope, child_other.tag, parent_self) if siblings[0] != child_self: siblings[0].addprevious(child_self) elif child_other.get(insert_tag) == 'last': scope = parent_self.getchildren() siblings = self._get_sequence(scope, child_other.tag, parent_self) if siblings[-1] != child_self: siblings[-1].addnext(child_self) elif child_other.get(insert_tag) == 'before': if child_other.get(key_tag) is None: _inserterror('before', self.device.get_xpath(child_other), 'key') sibling = parent_self.find(child_other.tag + child_other.get(key_tag), namespaces=child_other.nsmap) if sibling is None: path = self.device.get_xpath(child_other) key = child_other.get(key_tag) _inserterror('before', path, 'key', key) if sibling != child_self: sibling.addprevious(child_self) elif child_other.get(insert_tag) == 'after': if child_other.get(key_tag) is None: _inserterror('after', self.device.get_xpath(child_other), 'key') sibling = parent_self.find(child_other.tag + child_other.get(key_tag), namespaces=child_other.nsmap) if sibling is None: path = self.device.get_xpath(child_other) key = child_other.get(key_tag) _inserterror('after', path, 'key', key) if sibling != child_self: sibling.addnext(child_self) if child_other.get(operation_tag) is None or \ child_other.get(operation_tag) == 'merge': self.node_add(child_self, child_other) elif child_other.get(operation_tag) == 'replace': e = deepcopy(child_other) parent_self.replace(child_self, self._del_attrib(e)) elif child_other.get(operation_tag) == 'create': raise ConfigDeltaError('data-exists: try to create node {} but ' \ 'it already exists' \ .format(self.device.get_xpath(child_other))) elif child_other.get(operation_tag) == 'delete' or \ child_other.get(operation_tag) == 'remove': parent_self.remove(child_self) else: raise ConfigDeltaError("unknown operation: node {} contains " \ "operation '{}'" \ .format(self.device.get_xpath(child_other), child_other.get(operation_tag)))
_node_add_with_peer_list Low-level api: Apply delta child_other to child_self when child_self is the peer of child_other. Element child_self and child_other are list nodes. Element child_self will be modified during the process. RFC6020 section 7.8.6 is a reference of this method. Parameters ---------- child_self : `Element` A child of a config node in a config tree. child_other : `Element` A child of a config node in another config tree. child_self is the peer of child_other. Returns ------- None There is no return of this method.
def evaluate_scpd_xml(url): """ Get and evaluate SCPD XML to identified URLs. Returns dictionary with keys "host", "modelName", "friendlyName" and "presentationURL" if a Denon AVR device was found and "False" if not. """ # Get SCPD XML via HTTP GET try: res = requests.get(url, timeout=2) except requests.exceptions.RequestException as err: _LOGGER.error( "When trying to request %s the following error occurred: %s", url, err) raise ConnectionError if res.status_code == 200: try: root = ET.fromstring(res.text) # Look for manufacturer "Denon" in response. # Using "try" in case tags are not available in XML _LOGGER.debug("Device %s has manufacturer %s", url, root.find(SCPD_DEVICE).find(SCPD_MANUFACTURER).text) if (root.find(SCPD_DEVICE).find( SCPD_MANUFACTURER).text in SUPPORTED_MANUFACTURERS and root.find(SCPD_DEVICE).find( SCPD_DEVICETYPE).text == DEVICETYPE_DENON): device = {} device["host"] = urlparse( root.find(SCPD_DEVICE).find( SCPD_PRESENTATIONURL).text).hostname device["presentationURL"] = ( root.find(SCPD_DEVICE).find(SCPD_PRESENTATIONURL).text) device["modelName"] = ( root.find(SCPD_DEVICE).find(SCPD_MODELNAME).text) device["friendlyName"] = ( root.find(SCPD_DEVICE).find(SCPD_FRIENDLYNAME).text) return device else: return False except (AttributeError, ValueError, ET.ParseError) as err: _LOGGER.error( "Error occurred during evaluation of SCPD XML: %s", err) return False else: _LOGGER.error("Host returned HTTP status %s when connecting to %s", res.status_code, url) raise ConnectionError
Get and evaluate SCPD XML to identified URLs. Returns dictionary with keys "host", "modelName", "friendlyName" and "presentationURL" if a Denon AVR device was found and "False" if not.
def delete_rich_menu(self, rich_menu_id, timeout=None): """Call delete rich menu API. https://developers.line.me/en/docs/messaging-api/reference/#delete-rich-menu :param str rich_menu_id: ID of an uploaded rich menu :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a (connect timeout, read timeout) float tuple. Default is self.http_client.timeout :type timeout: float | tuple(float, float) """ self._delete( '/v2/bot/richmenu/{rich_menu_id}'.format(rich_menu_id=rich_menu_id), timeout=timeout )
Call delete rich menu API. https://developers.line.me/en/docs/messaging-api/reference/#delete-rich-menu :param str rich_menu_id: ID of an uploaded rich menu :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a (connect timeout, read timeout) float tuple. Default is self.http_client.timeout :type timeout: float | tuple(float, float)
def deactivate(self): """ Deactivates the Component. :return: Method success. :rtype: bool """ LOGGER.debug("> Deactivating '{0}' Component.".format(self.__class__.__name__)) self.__engine = None self.__settings = None self.__settings_section = None self.__script_editor = None self.activated = False return True
Deactivates the Component. :return: Method success. :rtype: bool
def adduser(username, uid=None, system=False, no_login=True, no_password=False, group=False, gecos=None, **kwargs): """ Formats an ``adduser`` command. :param username: User name. :type username: unicode | str :param uid: Optional user id to use. :type uid: long | int :param system: Create a system user account. :type system: bool :param no_login: Disable the login for this user. Not compatible with CentOS. Implies setting '--no-create-home', and ``no_password``. :type no_login: bool :param no_password: Disable the password for this user. Not compatible with CentOS. :type no_password: bool :param group: Create a group along with the user. Not compatible with CentOS. :type group: bool :param gecos: Set GECOS information in order to suppress an interactive prompt. On CentOS, use ``__comment`` instead. :type gecos: unicode | str :param kwargs: Additional keyword arguments which are converted to the command line. :return: A formatted ``adduser`` command with arguments. :rtype: unicode | str """ return _format_cmd('adduser', username, __system=bool(system), __uid=uid, __group=bool(group), __gid=uid, no_login=(no_login, _NO_CREATE_HOME, _NO_LOGIN), __disabled_password=no_login or bool(no_password), __gecos=gecos, **kwargs)
Formats an ``adduser`` command. :param username: User name. :type username: unicode | str :param uid: Optional user id to use. :type uid: long | int :param system: Create a system user account. :type system: bool :param no_login: Disable the login for this user. Not compatible with CentOS. Implies setting '--no-create-home', and ``no_password``. :type no_login: bool :param no_password: Disable the password for this user. Not compatible with CentOS. :type no_password: bool :param group: Create a group along with the user. Not compatible with CentOS. :type group: bool :param gecos: Set GECOS information in order to suppress an interactive prompt. On CentOS, use ``__comment`` instead. :type gecos: unicode | str :param kwargs: Additional keyword arguments which are converted to the command line. :return: A formatted ``adduser`` command with arguments. :rtype: unicode | str
def transaction(self): """ Sets up a context where all the statements within it are ran within a single database transaction. For internal use only. """ # The idea here is to fake the nesting of transactions. Only when # we've gotten back to the topmost transaction context do we actually # commit or rollback. with self.mdr: try: self._depth += 1 yield self self._depth -= 1 except self.mdr.OperationalError: # We've lost the connection, so there's no sense in # attempting to roll back back the transaction. self._depth -= 1 raise except: self._depth -= 1 if self._depth == 0: self.mdr.rollback() raise if self._depth == 0: self.mdr.commit()
Sets up a context where all the statements within it are ran within a single database transaction. For internal use only.
def slides(self): """ |Slides| object containing the slides in this presentation. """ sldIdLst = self._element.get_or_add_sldIdLst() self.part.rename_slide_parts([sldId.rId for sldId in sldIdLst]) return Slides(sldIdLst, self)
|Slides| object containing the slides in this presentation.
def job_conf(self, job_id): """ A job configuration resource contains information about the job configuration for this job. :param str job_id: The job id :returns: API response object with JSON data :rtype: :py:class:`yarn_api_client.base.Response` """ path = '/ws/v1/history/mapreduce/jobs/{jobid}/conf'.format(jobid=job_id) return self.request(path)
A job configuration resource contains information about the job configuration for this job. :param str job_id: The job id :returns: API response object with JSON data :rtype: :py:class:`yarn_api_client.base.Response`
def get_agg_data(cls, obj, category=None): """ Reduces any Overlay or NdOverlay of Elements into a single xarray Dataset that can be aggregated. """ paths = [] if isinstance(obj, Graph): obj = obj.edgepaths kdims = list(obj.kdims) vdims = list(obj.vdims) dims = obj.dimensions()[:2] if isinstance(obj, Path): glyph = 'line' for p in obj.split(datatype='dataframe'): paths.append(p) elif isinstance(obj, CompositeOverlay): element = None for key, el in obj.data.items(): x, y, element, glyph = cls.get_agg_data(el) dims = (x, y) df = PandasInterface.as_dframe(element) if isinstance(obj, NdOverlay): df = df.assign(**dict(zip(obj.dimensions('key', True), key))) paths.append(df) if element is None: dims = None else: kdims += element.kdims vdims = element.vdims elif isinstance(obj, Element): glyph = 'line' if isinstance(obj, Curve) else 'points' paths.append(PandasInterface.as_dframe(obj)) if dims is None or len(dims) != 2: return None, None, None, None else: x, y = dims if len(paths) > 1: if glyph == 'line': path = paths[0][:1] if isinstance(path, dd.DataFrame): path = path.compute() empty = path.copy() empty.iloc[0, :] = (np.NaN,) * empty.shape[1] paths = [elem for p in paths for elem in (p, empty)][:-1] if all(isinstance(path, dd.DataFrame) for path in paths): df = dd.concat(paths) else: paths = [p.compute() if isinstance(p, dd.DataFrame) else p for p in paths] df = pd.concat(paths) else: df = paths[0] if paths else pd.DataFrame([], columns=[x.name, y.name]) if category and df[category].dtype.name != 'category': df[category] = df[category].astype('category') is_dask = isinstance(df, dd.DataFrame) if any((not is_dask and len(df[d.name]) and isinstance(df[d.name].values[0], cftime_types)) or df[d.name].dtype.kind == 'M' for d in (x, y)): df = df.copy() for d in (x, y): vals = df[d.name] if not is_dask and len(vals) and isinstance(vals.values[0], cftime_types): vals = cftime_to_timestamp(vals, 'ns') elif df[d.name].dtype.kind == 'M': vals = vals.astype('datetime64[ns]') else: continue df[d.name] = vals.astype('int64') return x, y, Dataset(df, kdims=kdims, vdims=vdims), glyph
Reduces any Overlay or NdOverlay of Elements into a single xarray Dataset that can be aggregated.
def list_spiders(self, project): """ Lists all known spiders for a specific project. First class, maps to Scrapyd's list spiders endpoint. """ url = self._build_url(constants.LIST_SPIDERS_ENDPOINT) params = {'project': project} json = self.client.get(url, params=params, timeout=self.timeout) return json['spiders']
Lists all known spiders for a specific project. First class, maps to Scrapyd's list spiders endpoint.
def retry(func, exception_type, quit_event): """ Run the function, retrying when the specified exception_type occurs. Poll quit_event on each iteration, to be responsive to an external exit request. """ while True: if quit_event.is_set(): raise StopIteration try: return func() except exception_type: pass
Run the function, retrying when the specified exception_type occurs. Poll quit_event on each iteration, to be responsive to an external exit request.
def eigh(a, eigvec=True, rcond=None): """ Eigenvalues and eigenvectors of symmetric matrix ``a``. Args: a: Two-dimensional, square Hermitian matrix/array of numbers and/or :class:`gvar.GVar`\s. Array elements must be real-valued if `gvar.GVar`\s are involved (i.e., symmetric matrix). eigvec (bool): If ``True`` (default), method returns a tuple of arrays ``(val, vec)`` where ``val[i]`` are the eigenvalues of ``a`` (in ascending order), and ``vec[:, i]`` are the corresponding eigenvectors of ``a``. Only ``val`` is returned if ``eigvec=False``. rcond (float): Eigenvalues whose difference is smaller than ``rcond`` times their sum are assumed to be degenerate (and ignored) when computing variances for the eigvectors. Default (``rcond=None``) is ``max(M,N)`` times machine precision. Returns: Tuple ``(val,vec)`` of eigenvalues and eigenvectors of matrix ``a`` if parameter ``eigvec==True`` (default). The eigenvalues ``val[i]`` are in ascending order and ``vec[:, i]`` are the corresponding eigenvalues. Only the eigenvalues ``val`` are returned if ``eigvec=False``. Raises: ValueError: If matrix is not square and two-dimensional. """ a = numpy.asarray(a) if a.dtype != object: val, vec = numpy.linalg.eigh(a) return (val, vec) if eigvec else val amean = gvar.mean(a) if amean.ndim != 2 or amean.shape[0] != amean.shape[1]: raise ValueError('bad matrix shape: ' + str(a.shape)) if rcond is None: rcond = numpy.finfo(float).eps * max(a.shape) da = a - amean val0, vec0 = numpy.linalg.eigh(amean) val = val0 + [ vec0[:, i].conjugate().dot(da.dot(vec0[:, i])) for i in range(vec0.shape[1]) ] if eigvec == True: if vec0.dtype == complex: raise ValueError('cannot evaluate eigenvectors when a is complex') vec = numpy.array(vec0, dtype=object) for i in range(len(val)): for j in range(len(val)): dval = val0[i] - val0[j] if abs(dval) < rcond * abs(val0[j] + val0[i]) or dval == 0.0: continue vec[:, i] += vec0[:, j] * ( vec0[:, j].dot(da.dot(vec0[:, i])) / dval ) return val, vec else: return val
Eigenvalues and eigenvectors of symmetric matrix ``a``. Args: a: Two-dimensional, square Hermitian matrix/array of numbers and/or :class:`gvar.GVar`\s. Array elements must be real-valued if `gvar.GVar`\s are involved (i.e., symmetric matrix). eigvec (bool): If ``True`` (default), method returns a tuple of arrays ``(val, vec)`` where ``val[i]`` are the eigenvalues of ``a`` (in ascending order), and ``vec[:, i]`` are the corresponding eigenvectors of ``a``. Only ``val`` is returned if ``eigvec=False``. rcond (float): Eigenvalues whose difference is smaller than ``rcond`` times their sum are assumed to be degenerate (and ignored) when computing variances for the eigvectors. Default (``rcond=None``) is ``max(M,N)`` times machine precision. Returns: Tuple ``(val,vec)`` of eigenvalues and eigenvectors of matrix ``a`` if parameter ``eigvec==True`` (default). The eigenvalues ``val[i]`` are in ascending order and ``vec[:, i]`` are the corresponding eigenvalues. Only the eigenvalues ``val`` are returned if ``eigvec=False``. Raises: ValueError: If matrix is not square and two-dimensional.
def store_records_for_package(self, entry_point, records): """ Store the records in a way that permit lookup by package """ # If provided records already exist in the module mapping list, # it likely means that a package declared multiple keys for the # same package namespace; while normally this does not happen, # this default implementation make no assumptions as to whether # or not this is permitted. pkg_module_records = self._dist_to_package_module_map(entry_point) pkg_module_records.extend(records)
Store the records in a way that permit lookup by package
def get_params(self, deep=False): """ returns a dict of all of the object's user-facing parameters Parameters ---------- deep : boolean, default: False when True, also gets non-user-facing paramters Returns ------- dict """ attrs = self.__dict__ for attr in self._include: attrs[attr] = getattr(self, attr) if deep is True: return attrs return dict([(k,v) for k,v in list(attrs.items()) \ if (k[0] != '_') \ and (k[-1] != '_') \ and (k not in self._exclude)])
returns a dict of all of the object's user-facing parameters Parameters ---------- deep : boolean, default: False when True, also gets non-user-facing paramters Returns ------- dict
def fetch_existing_token_of_user(self, client_id, grant_type, user_id): """ Retrieve an access token issued to a client and user for a specific grant. :param client_id: The identifier of a client as a `str`. :param grant_type: The type of grant. :param user_id: The identifier of the user the access token has been issued to. :return: An instance of :class:`oauth2.datatype.AccessToken`. :raises: :class:`oauth2.error.AccessTokenNotFound` if not access token could be retrieved. """ token_data = self.fetchone(self.fetch_existing_token_of_user_query, client_id, grant_type, user_id) if token_data is None: raise AccessTokenNotFound scopes = self._fetch_scopes(access_token_id=token_data[0]) data = self._fetch_data(access_token_id=token_data[0]) return self._row_to_token(data=data, scopes=scopes, row=token_data)
Retrieve an access token issued to a client and user for a specific grant. :param client_id: The identifier of a client as a `str`. :param grant_type: The type of grant. :param user_id: The identifier of the user the access token has been issued to. :return: An instance of :class:`oauth2.datatype.AccessToken`. :raises: :class:`oauth2.error.AccessTokenNotFound` if not access token could be retrieved.
def parse_uniprot_txt_file(infile): """Parse a raw UniProt metadata file and return a dictionary. Args: infile: Path to metadata file Returns: dict: Metadata dictionary """ uniprot_metadata_dict = {} metadata = old_parse_uniprot_txt_file(infile) metadata_keys = list(metadata.keys()) if metadata_keys: metadata_key = metadata_keys[0] else: return uniprot_metadata_dict uniprot_metadata_dict['seq_len'] = len(str(metadata[metadata_key]['sequence'])) uniprot_metadata_dict['reviewed'] = metadata[metadata_key]['is_reviewed'] uniprot_metadata_dict['seq_version'] = metadata[metadata_key]['sequence_version'] uniprot_metadata_dict['entry_version'] = metadata[metadata_key]['entry_version'] if 'gene' in metadata[metadata_key]: uniprot_metadata_dict['gene_name'] = metadata[metadata_key]['gene'] if 'description' in metadata[metadata_key]: uniprot_metadata_dict['description'] = metadata[metadata_key]['description'] if 'refseq' in metadata[metadata_key]: uniprot_metadata_dict['refseq'] = metadata[metadata_key]['refseq'] if 'kegg' in metadata[metadata_key]: uniprot_metadata_dict['kegg'] = metadata[metadata_key]['kegg'] if 'ec' in metadata[metadata_key]: uniprot_metadata_dict['ec_number'] = metadata[metadata_key]['ec'] if 'pfam' in metadata[metadata_key]: uniprot_metadata_dict['pfam'] = metadata[metadata_key]['pfam'] if 'pdbs' in metadata[metadata_key]: uniprot_metadata_dict['pdbs'] = list(set(metadata[metadata_key]['pdbs'])) return uniprot_metadata_dict
Parse a raw UniProt metadata file and return a dictionary. Args: infile: Path to metadata file Returns: dict: Metadata dictionary
def splitEkmDate(dateint): """Break out a date from Omnimeter read. Note a corrupt date will raise an exception when you convert it to int to hand to this method. Args: dateint (int): Omnimeter datetime as int. Returns: tuple: Named tuple which breaks out as followws: ========== ===================== yy Last 2 digits of year mm Month 1-12 dd Day 1-31 weekday Zero based weekday hh Hour 0-23 minutes Minutes 0-59 ss Seconds 0-59 ========== ===================== """ date_str = str(dateint) dt = namedtuple('EkmDate', ['yy', 'mm', 'dd', 'weekday', 'hh', 'minutes', 'ss']) if len(date_str) != 14: dt.yy = dt.mm = dt.dd = dt.weekday = dt.hh = dt.minutes = dt.ss = 0 return dt dt.yy = int(date_str[0:2]) dt.mm = int(date_str[2:4]) dt.dd = int(date_str[4:6]) dt.weekday = int(date_str[6:8]) dt.hh = int(date_str[8:10]) dt.minutes = int(date_str[10:12]) dt.ss = int(date_str[12:14]) return dt
Break out a date from Omnimeter read. Note a corrupt date will raise an exception when you convert it to int to hand to this method. Args: dateint (int): Omnimeter datetime as int. Returns: tuple: Named tuple which breaks out as followws: ========== ===================== yy Last 2 digits of year mm Month 1-12 dd Day 1-31 weekday Zero based weekday hh Hour 0-23 minutes Minutes 0-59 ss Seconds 0-59 ========== =====================
def rsa_base64_decrypt(self, cipher, b64=True): """ 先base64 解码 再rsa 解密数据 """ with open(self.key_file) as fp: key_ = RSA.importKey(fp.read()) _cip = PKCS1_v1_5.new(key_) cipher = base64.b64decode(cipher) if b64 else cipher plain = _cip.decrypt(cipher, Random.new().read(15 + SHA.digest_size)) return helper.to_str(plain)
先base64 解码 再rsa 解密数据
def clearAdvancedActions( self ): """ Clears out the advanced action map. """ self._advancedMap.clear() margins = list(self.getContentsMargins()) margins[2] = 0 self.setContentsMargins(*margins)
Clears out the advanced action map.
def select_if(df, fun): """Selects columns where fun(ction) is true Args: fun: a function that will be applied to columns """ def _filter_f(col): try: return fun(df[col]) except: return False cols = list(filter(_filter_f, df.columns)) return df[cols]
Selects columns where fun(ction) is true Args: fun: a function that will be applied to columns
def forecast(stl, fc_func, steps=10, seasonal=False, **fc_func_kwargs): """Forecast the given decomposition ``stl`` forward by ``steps`` steps using the forecasting function ``fc_func``, optionally including the calculated seasonality. This is an additive model, Y[t] = T[t] + S[t] + e[t] Args: stl (a modified statsmodels.tsa.seasonal.DecomposeResult): STL decomposition of observed time series created using the ``stldecompose.decompose()`` method. fc_func (function): Function which takes an array of observations and returns a single valued forecast for the next point. steps (int, optional): Number of forward steps to include in the forecast seasonal (bool, optional): Include seasonal component in forecast fc_func_kwargs: keyword arguments All remaining arguments are passed to the forecasting function ``fc_func`` Returns: forecast_frame (pd.Dataframe): A ``pandas.Dataframe`` containing forecast values and a DatetimeIndex matching the observed index. """ # container for forecast values forecast_array = np.array([]) # forecast trend # unpack precalculated trend array stl frame trend_array = stl.trend # iteratively forecast trend ("seasonally adjusted") component # note: this loop can be slow for step in range(steps): # make this prediction on all available data pred = fc_func(np.append(trend_array, forecast_array), **fc_func_kwargs) # add this prediction to current array forecast_array = np.append(forecast_array, pred) col_name = fc_func.__name__ # forecast start and index are determined by observed data observed_timedelta = stl.observed.index[-1] - stl.observed.index[-2] forecast_idx_start = stl.observed.index[-1] + observed_timedelta forecast_idx = pd.date_range(start=forecast_idx_start, periods=steps, freq=pd.tseries.frequencies.to_offset(observed_timedelta)) # (optionally) forecast seasonal & combine if seasonal: # track index and value of max correlation seasonal_ix = 0 max_correlation = -np.inf # loop over indexes=length of period avgs detrended_array = np.asanyarray(stl.observed - stl.trend).squeeze() for i, x in enumerate(stl.period_averages): # work slices backward from end of detrended observations if i == 0: # slicing w/ [x:-0] doesn't work detrended_slice = detrended_array[-len(stl.period_averages):] else: detrended_slice = detrended_array[-(len(stl.period_averages) + i):-i] # calculate corr b/w period_avgs and detrend_slice this_correlation = np.correlate(detrended_slice, stl.period_averages)[0] if this_correlation > max_correlation: # update ix and max correlation max_correlation = this_correlation seasonal_ix = i # roll seasonal signal to matching phase rolled_period_averages = np.roll(stl.period_averages, -seasonal_ix) # tile as many time as needed to reach "steps", then truncate tiled_averages = np.tile(rolled_period_averages, (steps // len(stl.period_averages) + 1))[:steps] # add seasonal values to previous forecast forecast_array += tiled_averages col_name += '+seasonal' # combine data array with index into named dataframe forecast_frame = pd.DataFrame(data=forecast_array, index=forecast_idx) forecast_frame.columns = [col_name] return forecast_frame
Forecast the given decomposition ``stl`` forward by ``steps`` steps using the forecasting function ``fc_func``, optionally including the calculated seasonality. This is an additive model, Y[t] = T[t] + S[t] + e[t] Args: stl (a modified statsmodels.tsa.seasonal.DecomposeResult): STL decomposition of observed time series created using the ``stldecompose.decompose()`` method. fc_func (function): Function which takes an array of observations and returns a single valued forecast for the next point. steps (int, optional): Number of forward steps to include in the forecast seasonal (bool, optional): Include seasonal component in forecast fc_func_kwargs: keyword arguments All remaining arguments are passed to the forecasting function ``fc_func`` Returns: forecast_frame (pd.Dataframe): A ``pandas.Dataframe`` containing forecast values and a DatetimeIndex matching the observed index.
def all(self): """ :return: array of file objects (200 status code) """ url = "{url_base}/resource/{pid}/files/".format(url_base=self.hs.url_base, pid=self.pid) r = self.hs._request('GET', url) return r
:return: array of file objects (200 status code)
def make_jira_blueprint( base_url, consumer_key=None, rsa_key=None, redirect_url=None, redirect_to=None, login_url=None, authorized_url=None, session_class=None, storage=None, ): """ Make a blueprint for authenticating with JIRA using OAuth 1. This requires a consumer key and RSA key for the JIRA application link. You should either pass them to this constructor, or make sure that your Flask application config defines them, using the variables :envvar:`JIRA_OAUTH_CONSUMER_KEY` and :envvar:`JIRA_OAUTH_RSA_KEY`. Args: base_url (str): The base URL of your JIRA installation. For example, for Atlassian's hosted Cloud JIRA, the base_url would be ``https://jira.atlassian.com`` consumer_key (str): The consumer key for your Application Link on JIRA rsa_key (str or path): The RSA private key for your Application Link on JIRA. This can be the contents of the key as a string, or a path to the key file on disk. redirect_url (str): the URL to redirect to after the authentication dance is complete redirect_to (str): if ``redirect_url`` is not defined, the name of the view to redirect to after the authentication dance is complete. The actual URL will be determined by :func:`flask.url_for` login_url (str, optional): the URL path for the ``login`` view. Defaults to ``/jira`` authorized_url (str, optional): the URL path for the ``authorized`` view. Defaults to ``/jira/authorized``. session_class (class, optional): The class to use for creating a Requests session. Defaults to :class:`~flask_dance.contrib.jira.JsonOAuth1Session`. storage: A token storage class, or an instance of a token storage class, to use for this blueprint. Defaults to :class:`~flask_dance.consumer.storage.session.SessionStorage`. :rtype: :class:`~flask_dance.consumer.OAuth1ConsumerBlueprint` :returns: A :ref:`blueprint <flask:blueprints>` to attach to your Flask app. """ if rsa_key and os.path.isfile(rsa_key): with open(rsa_key) as f: rsa_key = f.read() base_url = URLObject(base_url) jira_bp = OAuth1ConsumerBlueprint( "jira", __name__, client_key=consumer_key, rsa_key=rsa_key, signature_method=SIGNATURE_RSA, base_url=base_url, request_token_url=base_url.relative("plugins/servlet/oauth/request-token"), access_token_url=base_url.relative("plugins/servlet/oauth/access-token"), authorization_url=base_url.relative("plugins/servlet/oauth/authorize"), redirect_url=redirect_url, redirect_to=redirect_to, login_url=login_url, authorized_url=authorized_url, session_class=session_class or JsonOAuth1Session, storage=storage, ) jira_bp.from_config["client_key"] = "JIRA_OAUTH_CONSUMER_KEY" jira_bp.from_config["rsa_key"] = "JIRA_OAUTH_RSA_KEY" @jira_bp.before_app_request def set_applocal_session(): ctx = stack.top ctx.jira_oauth = jira_bp.session return jira_bp
Make a blueprint for authenticating with JIRA using OAuth 1. This requires a consumer key and RSA key for the JIRA application link. You should either pass them to this constructor, or make sure that your Flask application config defines them, using the variables :envvar:`JIRA_OAUTH_CONSUMER_KEY` and :envvar:`JIRA_OAUTH_RSA_KEY`. Args: base_url (str): The base URL of your JIRA installation. For example, for Atlassian's hosted Cloud JIRA, the base_url would be ``https://jira.atlassian.com`` consumer_key (str): The consumer key for your Application Link on JIRA rsa_key (str or path): The RSA private key for your Application Link on JIRA. This can be the contents of the key as a string, or a path to the key file on disk. redirect_url (str): the URL to redirect to after the authentication dance is complete redirect_to (str): if ``redirect_url`` is not defined, the name of the view to redirect to after the authentication dance is complete. The actual URL will be determined by :func:`flask.url_for` login_url (str, optional): the URL path for the ``login`` view. Defaults to ``/jira`` authorized_url (str, optional): the URL path for the ``authorized`` view. Defaults to ``/jira/authorized``. session_class (class, optional): The class to use for creating a Requests session. Defaults to :class:`~flask_dance.contrib.jira.JsonOAuth1Session`. storage: A token storage class, or an instance of a token storage class, to use for this blueprint. Defaults to :class:`~flask_dance.consumer.storage.session.SessionStorage`. :rtype: :class:`~flask_dance.consumer.OAuth1ConsumerBlueprint` :returns: A :ref:`blueprint <flask:blueprints>` to attach to your Flask app.
def deletePartials(self): """ Delete any old partial uploads/downloads in path. """ if self.dryrun: self._client.listPartials() else: self._client.deletePartials()
Delete any old partial uploads/downloads in path.
def items(iterable): """ Iterates over the items of a sequence. If the sequence supports the dictionary protocol (iteritems/items) then we use that. Otherwise we use the enumerate built-in function. """ if hasattr(iterable, 'iteritems'): return (p for p in iterable.iteritems()) elif hasattr(iterable, 'items'): return (p for p in iterable.items()) else: return (p for p in enumerate(iterable))
Iterates over the items of a sequence. If the sequence supports the dictionary protocol (iteritems/items) then we use that. Otherwise we use the enumerate built-in function.
def get_part_name(self, undefined=""): """ Args: undefined (optional): Argument, which will be returned if the `part_name` record is not found. Returns: str: Name of the part of the series. or `undefined` if `part_name`\ is not found. """ return _undefined_pattern( "".join(self.get_subfields("245", "n")), lambda x: x.strip() == "", undefined )
Args: undefined (optional): Argument, which will be returned if the `part_name` record is not found. Returns: str: Name of the part of the series. or `undefined` if `part_name`\ is not found.
def register_phonon_task(self, *args, **kwargs): """Register a phonon task.""" kwargs["task_class"] = PhononTask return self.register_task(*args, **kwargs)
Register a phonon task.
def get_default_saver(max_to_keep: int=3) -> tf.train.Saver: """ Creates Tensorflow Saver object with 3 recent checkpoints to keep. :param max_to_keep: Maximum number of recent checkpoints to keep, defaults to 3 """ return tf.train.Saver(max_to_keep=max_to_keep)
Creates Tensorflow Saver object with 3 recent checkpoints to keep. :param max_to_keep: Maximum number of recent checkpoints to keep, defaults to 3
def generate_stimfunction(onsets, event_durations, total_time, weights=[1], timing_file=None, temporal_resolution=100.0, ): """Return the function for the timecourse events When do stimuli onset, how long for and to what extent should you resolve the fMRI time course. There are two ways to create this, either by supplying onset, duration and weight information or by supplying a timing file (in the three column format used by FSL). Parameters ---------- onsets : list, int What are the timestamps (in s) for when an event you want to generate onsets? event_durations : list, int What are the durations (in s) of the events you want to generate? If there is only one value then this will be assigned to all onsets total_time : int How long (in s) is the experiment in total. weights : list, float What is the weight for each event (how high is the box car)? If there is only one value then this will be assigned to all onsets timing_file : string The filename (with path) to a three column timing file (FSL) to make the events. Still requires total_time to work temporal_resolution : float How many elements per second are you modeling for the timecourse. This is useful when you want to model the HRF at an arbitrarily high resolution (and then downsample to your TR later). Returns ---------- stim_function : 1 by timepoint array, float The time course of stimulus evoked activation. This has a temporal resolution of temporal resolution / 1.0 elements per second """ # If the timing file is supplied then use this to acquire the if timing_file is not None: # Read in text file line by line with open(timing_file) as f: text = f.readlines() # Pull out file as a an array # Preset onsets = list() event_durations = list() weights = list() # Pull out the onsets, weights and durations, set as a float for line in text: onset, duration, weight = line.strip().split() # Check if the onset is more precise than the temporal resolution upsampled_onset = float(onset) * temporal_resolution # Because of float precision, the upsampled values might # not round as expected . # E.g. float('1.001') * 1000 = 1000.99 if np.allclose(upsampled_onset, np.round(upsampled_onset)) == 0: warning = 'Your onset: ' + str(onset) + ' has more decimal ' \ 'points than the ' \ 'specified temporal ' \ 'resolution can ' \ 'resolve. This means' \ ' that events might' \ ' be missed. ' \ 'Consider increasing' \ ' the temporal ' \ 'resolution.' logger.warning(warning) onsets.append(float(onset)) event_durations.append(float(duration)) weights.append(float(weight)) # If only one duration is supplied then duplicate it for the length of # the onset variable if len(event_durations) == 1: event_durations = event_durations * len(onsets) if len(weights) == 1: weights = weights * len(onsets) # Check files if np.max(onsets) > total_time: raise ValueError('Onsets outside of range of total time.') # Generate the time course as empty, each element is a millisecond by # default stimfunction = np.zeros((int(round(total_time * temporal_resolution)), 1)) # Cycle through the onsets for onset_counter in list(range(len(onsets))): # Adjust for the resolution onset_idx = int(np.floor(onsets[onset_counter] * temporal_resolution)) # Adjust for the resolution offset_idx = int(np.floor((onsets[onset_counter] + event_durations[ onset_counter]) * temporal_resolution)) # Store the weights stimfunction[onset_idx:offset_idx, 0] = [weights[onset_counter]] return stimfunction
Return the function for the timecourse events When do stimuli onset, how long for and to what extent should you resolve the fMRI time course. There are two ways to create this, either by supplying onset, duration and weight information or by supplying a timing file (in the three column format used by FSL). Parameters ---------- onsets : list, int What are the timestamps (in s) for when an event you want to generate onsets? event_durations : list, int What are the durations (in s) of the events you want to generate? If there is only one value then this will be assigned to all onsets total_time : int How long (in s) is the experiment in total. weights : list, float What is the weight for each event (how high is the box car)? If there is only one value then this will be assigned to all onsets timing_file : string The filename (with path) to a three column timing file (FSL) to make the events. Still requires total_time to work temporal_resolution : float How many elements per second are you modeling for the timecourse. This is useful when you want to model the HRF at an arbitrarily high resolution (and then downsample to your TR later). Returns ---------- stim_function : 1 by timepoint array, float The time course of stimulus evoked activation. This has a temporal resolution of temporal resolution / 1.0 elements per second
def to_text(self, tree, force_root=False): """ Extract text from tags. Skip any selectors specified and include attributes if specified. Ignored tags will not have their attributes scanned either. """ self.extract_tag_metadata(tree) text = [] attributes = [] comments = [] blocks = [] if not (self.ignores.match(tree) if self.ignores else None): # The root of the document is the BeautifulSoup object capture = self.captures.match(tree) if self.captures is not None else None # Check attributes for normal tags if capture: for attr in self.attributes: value = tree.attrs.get(attr, '').strip() if value: sel = self.construct_selector(tree, attr=attr) attributes.append((value, sel)) # Walk children for child in tree.children: string = str(child).strip() is_comment = isinstance(child, bs4.Comment) if isinstance(child, bs4.element.Tag): t, b, a, c = self.to_text(child) text.extend(t) attributes.extend(a) comments.extend(c) blocks.extend(b) # Get content if not the root and not a comment (unless we want comments). elif not isinstance(child, NON_CONTENT) and (not is_comment or self.comments): string = str(child).strip() if string: if is_comment: sel = self.construct_selector(tree) + '<!--comment-->' comments.append((string, sel)) elif capture: text.append(string) text.append(' ') elif self.comments: for child in tree.descendants: if isinstance(child, bs4.Comment): string = str(child).strip() if string: sel = self.construct_selector(tree) + '<!--comment-->' comments.append((string, sel)) text = self.store_blocks(tree, blocks, text, force_root) if tree.parent is None or force_root: return blocks, attributes, comments else: return text, blocks, attributes, comments
Extract text from tags. Skip any selectors specified and include attributes if specified. Ignored tags will not have their attributes scanned either.
def find_analyses(ar_or_sample): """ This function is used to find keywords that are not on the analysis but keywords that are on the interim fields. This function and is is_keyword function should probably be in resultsimport.py or somewhere central where it can be used by other instrument interfaces. """ bc = api.get_tool(CATALOG_ANALYSIS_REQUEST_LISTING) ar = bc(portal_type='AnalysisRequest', id=ar_or_sample) if len(ar) == 0: ar = bc(portal_type='AnalysisRequest', getClientSampleID=ar_or_sample) if len(ar) == 1: obj = ar[0].getObject() analyses = obj.getAnalyses(full_objects=True) return analyses return []
This function is used to find keywords that are not on the analysis but keywords that are on the interim fields. This function and is is_keyword function should probably be in resultsimport.py or somewhere central where it can be used by other instrument interfaces.
def match_similar(base, items): """Get the most similar matching item from a list of items. @param base: base item to locate best match @param items: list of items for comparison @return: most similar matching item or None """ finds = list(find_similar(base, items)) if finds: return max(finds, key=base.similarity) # TODO: make O(n) return None
Get the most similar matching item from a list of items. @param base: base item to locate best match @param items: list of items for comparison @return: most similar matching item or None
def dcounts(self): """ :return: a data frame with names and distinct counts and fractions for all columns in the database """ print("WARNING: Distinct value count for all tables can take a long time...", file=sys.stderr) sys.stderr.flush() data = [] for t in self.tables(): for c in t.columns(): data.append([t.name(), c.name(), c.dcount(), t.size(), c.dcount() / float(t.size())]) df = pd.DataFrame(data, columns=["table", "column", "distinct", "size", "fraction"]) return df
:return: a data frame with names and distinct counts and fractions for all columns in the database
def select_unrectified_slitlet(image2d, islitlet, csu_bar_slit_center, params, parmodel, maskonly): """Returns image with the indicated slitlet (zero anywhere else). Parameters ---------- image2d : numpy array Initial image from which the slitlet data will be extracted. islitlet : int Slitlet number. csu_bar_slit_center : float CSU bar slit center. params : :class:`~lmfit.parameter.Parameters` Parameters to be employed in the prediction of the distorted boundaries. parmodel : str Model to be assumed. Allowed values are 'longslit' and 'multislit'. maskonly : bool If True, returns simply a mask (1 in the slitlet region and zero anywhere else. Returns ------- image2d_output : numpy array 2D image with the pixel information corresponding to the selected slitlet and zero everywhere else. """ # protection if image2d.shape != (EMIR_NAXIS2, EMIR_NAXIS1): raise ValueError("NAXIS1, NAXIS2 unexpected for EMIR detector") # initialize image output image2d_output = np.zeros_like(image2d) # expected slitlet frontiers list_expected_frontiers = expected_distorted_frontiers( islitlet, csu_bar_slit_center, params, parmodel, numpts=101, deg=5, debugplot=0 ) pol_lower_expected = list_expected_frontiers[0].poly_funct pol_upper_expected = list_expected_frontiers[1].poly_funct # main loop: compute for each channel the minimum and maximum scan for j in range(EMIR_NAXIS1): xchannel = j + 1 y0_lower = pol_lower_expected(xchannel) y0_upper = pol_upper_expected(xchannel) n1, n2 = nscan_minmax_frontiers(y0_frontier_lower=y0_lower, y0_frontier_upper=y0_upper, resize=True) # note that n1 and n2 are scans (ranging from 1 to NAXIS2) if maskonly: image2d_output[(n1 - 1):n2, j] = np.repeat( [1.0], (n2 - n1 + 1) ) else: image2d_output[(n1 - 1):n2, j] = image2d[(n1 - 1):n2, j] return image2d_output
Returns image with the indicated slitlet (zero anywhere else). Parameters ---------- image2d : numpy array Initial image from which the slitlet data will be extracted. islitlet : int Slitlet number. csu_bar_slit_center : float CSU bar slit center. params : :class:`~lmfit.parameter.Parameters` Parameters to be employed in the prediction of the distorted boundaries. parmodel : str Model to be assumed. Allowed values are 'longslit' and 'multislit'. maskonly : bool If True, returns simply a mask (1 in the slitlet region and zero anywhere else. Returns ------- image2d_output : numpy array 2D image with the pixel information corresponding to the selected slitlet and zero everywhere else.
def CreateJarBuilder(env): """The Jar builder expects a list of class files which it can package into a jar file. The jar tool provides an interface for passing other types of java files such as .java, directories or swig interfaces and will build them to class files in which it can package into the jar. """ try: java_jar = env['BUILDERS']['JarFile'] except KeyError: fs = SCons.Node.FS.get_default_fs() jar_com = SCons.Action.Action('$JARCOM', '$JARCOMSTR') java_jar = SCons.Builder.Builder(action = jar_com, suffix = '$JARSUFFIX', src_suffix = '$JAVACLASSSUFFIX', src_builder = 'JavaClassFile', source_factory = fs.Entry) env['BUILDERS']['JarFile'] = java_jar return java_jar
The Jar builder expects a list of class files which it can package into a jar file. The jar tool provides an interface for passing other types of java files such as .java, directories or swig interfaces and will build them to class files in which it can package into the jar.
def _init_data_map(self): """ OVERRIDDEN: Initialize required FGDC data map with XPATHS and specialized functions """ if self._data_map is not None: return # Initiation happens once # Parse and validate the ArcGIS metadata root if self._xml_tree is None: agis_root = ARCGIS_ROOTS[0] # Default to uncapitalized else: agis_root = get_element_name(self._xml_tree) if agis_root not in ARCGIS_ROOTS: raise InvalidContent('Invalid XML root for ArcGIS metadata: {root}', root=agis_root) agis_data_map = {'_root': agis_root} agis_data_map.update(_agis_tag_formats) agis_data_structures = {} # Capture and format complex XPATHs ad_format = agis_data_map[ATTRIBUTES] agis_data_structures[ATTRIBUTES] = format_xpaths( _agis_definitions[ATTRIBUTES], label=ad_format.format(ad_path='attrlabl'), aliases=ad_format.format(ad_path='attalias'), definition=ad_format.format(ad_path='attrdef'), definition_src=ad_format.format(ad_path='attrdefs') ) bb_format = agis_data_map[BOUNDING_BOX] agis_data_structures[BOUNDING_BOX] = format_xpaths( _agis_definitions[BOUNDING_BOX], east=bb_format.format(bbox_path='eastBL'), south=bb_format.format(bbox_path='southBL'), west=bb_format.format(bbox_path='westBL'), north=bb_format.format(bbox_path='northBL') ) ct_format = agis_data_map[CONTACTS] agis_data_structures[CONTACTS] = format_xpaths( _agis_definitions[CONTACTS], name=ct_format.format(ct_path='rpIndName'), organization=ct_format.format(ct_path='rpOrgName'), position=ct_format.format(ct_path='rpPosName'), email=ct_format.format(ct_path='rpCntInfo/cntAddress/eMailAdd') ) dt_format = agis_data_map[DATES] agis_data_structures[DATES] = { DATE_TYPE_MULTIPLE: dt_format.format(type_path='TM_Instant/tmPosition'), '_' + DATE_TYPE_MULTIPLE: dt_format.format(type_path='TM_Instant/tmPosition/@date'), DATE_TYPE_RANGE_BEGIN: dt_format.format(type_path='TM_Period/tmBegin'), '_' + DATE_TYPE_RANGE_BEGIN: dt_format.format(type_path='TM_Period/tmBegin/@date'), DATE_TYPE_RANGE_END: dt_format.format(type_path='TM_Period/tmEnd'), '_' + DATE_TYPE_RANGE_END: dt_format.format(type_path='TM_Period/tmEnd/@date'), # Same as multiple dates, but will contain only one DATE_TYPE_SINGLE: dt_format.format(type_path='TM_Instant/tmPosition'), '_' + DATE_TYPE_SINGLE: dt_format.format(type_path='TM_Instant/tmPosition/@date') } agis_data_structures[DATES][DATE_TYPE_RANGE] = [ agis_data_structures[DATES][DATE_TYPE_RANGE_BEGIN], agis_data_structures[DATES][DATE_TYPE_RANGE_END] ] agis_data_structures[DATES]['_' + DATE_TYPE_RANGE] = [ agis_data_structures[DATES]['_' + DATE_TYPE_RANGE_BEGIN], agis_data_structures[DATES]['_' + DATE_TYPE_RANGE_END] ] df_format = agis_data_map[DIGITAL_FORMS] agis_data_structures[DIGITAL_FORMS] = format_xpaths( _agis_definitions[DIGITAL_FORMS], name=df_format.format(df_path='formatName'), content=df_format.format(df_path='formatInfo'), decompression=df_format.format(df_path='fileDecmTech'), version=df_format.format(df_path='formatVer'), specification=df_format.format(df_path='formatSpec'), access_desc=agis_data_map['_access_desc'], access_instrs=agis_data_map['_access_instrs'], network_resource=agis_data_map['_network_resource'] ) lw_format = agis_data_map[LARGER_WORKS] agis_data_structures[LARGER_WORKS] = format_xpaths( _agis_definitions[LARGER_WORKS], title=lw_format.format(lw_path='resTitle'), edition=lw_format.format(lw_path='resEd'), origin=lw_format.format(lw_path='citRespParty/rpIndName'), online_linkage=lw_format.format(lw_path='citRespParty/rpCntInfo/cntOnlineRes/linkage'), other_citation=lw_format.format(lw_path='otherCitDet'), date=lw_format.format(lw_path='date/pubDate'), place=lw_format.format(lw_path='citRespParty/rpCntInfo/cntAddress/city'), info=lw_format.format(lw_path='citRespParty/rpOrgName') ) ps_format = agis_data_map[PROCESS_STEPS] agis_data_structures[PROCESS_STEPS] = format_xpaths( _agis_definitions[PROCESS_STEPS], description=ps_format.format(ps_path='stepDesc'), date=ps_format.format(ps_path='stepDateTm'), sources=ps_format.format(ps_path='stepSrc/srcDesc') ) ri_format = agis_data_map[RASTER_INFO] agis_data_structures[RASTER_INFO] = format_xpaths( _agis_definitions[RASTER_DIMS], type=ri_format.format(ri_path='@type'), size=ri_format.format(ri_path='dimSize'), value=ri_format.format(ri_path='dimResol/value'), units=ri_format.format(ri_path='dimResol/value/@uom') ) # Assign XPATHS and gis_metadata.utils.ParserProperties to data map for prop, xpath in iteritems(dict(agis_data_map)): if prop in (ATTRIBUTES, CONTACTS, PROCESS_STEPS): agis_data_map[prop] = ParserProperty(self._parse_complex_list, self._update_complex_list) elif prop in (BOUNDING_BOX, LARGER_WORKS): agis_data_map[prop] = ParserProperty(self._parse_complex, self._update_complex) elif prop in ('attribute_accuracy', 'dataset_completeness'): agis_data_map[prop] = ParserProperty(self._parse_report_item, self._update_report_item) elif prop == DATES: agis_data_map[prop] = ParserProperty(self._parse_dates, self._update_dates) elif prop == DIGITAL_FORMS: agis_data_map[prop] = ParserProperty(self._parse_digital_forms, self._update_digital_forms) elif prop == RASTER_INFO: agis_data_map[prop] = ParserProperty(self._parse_raster_info, self._update_raster_info) else: agis_data_map[prop] = xpath self._data_map = agis_data_map self._data_structures = agis_data_structures
OVERRIDDEN: Initialize required FGDC data map with XPATHS and specialized functions
def printBoundingBox(self): '''Print the bounding box that this DEM covers''' print ("Bounding Latitude: ") print (self.startlatitude) print (self.endlatitude) print ("Bounding Longitude: ") print (self.startlongitude) print (self.endlongitude)
Print the bounding box that this DEM covers
def Send(self, url, opname, pyobj, nsdict={}, soapaction=None, chain=None, **kw): """Returns a ProcessingChain which needs to be passed to Receive if Send is being called consecutively. """ url = url or self.url cookies = None if chain is not None: cookies = chain.flow.cookies d = {} d.update(self.nsdict) d.update(nsdict) if soapaction is not None: self.addHTTPHeader('SOAPAction', soapaction) chain = self.factory.newInstance() soapdata = chain.processRequest(pyobj, nsdict=nsdict, soapaction=soapaction, **kw) if self.trace: print >>self.trace, "_" * 33, time.ctime(time.time()), "REQUEST:" print >>self.trace, soapdata f = getPage(str(url), contextFactory=self.contextFactory, postdata=soapdata, agent=self.agent, method='POST', headers=self.getHTTPHeaders(), cookies=cookies) if isinstance(f, Failure): return f chain.flow = f self.chain = chain return chain
Returns a ProcessingChain which needs to be passed to Receive if Send is being called consecutively.
def __delete_action(self, revision): """ Handle a delete action to a partiular master id via the revision. :param dict revision: :return: """ delete_response = yield self.collection.delete(revision.get("master_id")) if delete_response.get("n") == 0: raise DocumentRevisionDeleteFailed()
Handle a delete action to a partiular master id via the revision. :param dict revision: :return:
def columnSchema(self): """ Returns the schema for the image column. :return: a :class:`StructType` for image column, ``struct<origin:string, height:int, width:int, nChannels:int, mode:int, data:binary>``. .. versionadded:: 2.4.0 """ if self._columnSchema is None: ctx = SparkContext._active_spark_context jschema = ctx._jvm.org.apache.spark.ml.image.ImageSchema.columnSchema() self._columnSchema = _parse_datatype_json_string(jschema.json()) return self._columnSchema
Returns the schema for the image column. :return: a :class:`StructType` for image column, ``struct<origin:string, height:int, width:int, nChannels:int, mode:int, data:binary>``. .. versionadded:: 2.4.0
def update(self, unique_name=values.unset, default_ttl=values.unset, callback_url=values.unset, geo_match_level=values.unset, number_selection_behavior=values.unset, intercept_callback_url=values.unset, out_of_session_callback_url=values.unset, chat_instance_sid=values.unset): """ Update the ServiceInstance :param unicode unique_name: An application-defined string that uniquely identifies the resource :param unicode default_ttl: Default TTL for a Session, in seconds :param unicode callback_url: The URL we should call when the interaction status changes :param ServiceInstance.GeoMatchLevel geo_match_level: Where a proxy number must be located relative to the participant identifier :param ServiceInstance.NumberSelectionBehavior number_selection_behavior: The preference for Proxy Number selection for the Service instance :param unicode intercept_callback_url: The URL we call on each interaction :param unicode out_of_session_callback_url: The URL we call when an inbound call or SMS action occurs on a closed or non-existent Session :param unicode chat_instance_sid: The SID of the Chat Service Instance :returns: Updated ServiceInstance :rtype: twilio.rest.proxy.v1.service.ServiceInstance """ return self._proxy.update( unique_name=unique_name, default_ttl=default_ttl, callback_url=callback_url, geo_match_level=geo_match_level, number_selection_behavior=number_selection_behavior, intercept_callback_url=intercept_callback_url, out_of_session_callback_url=out_of_session_callback_url, chat_instance_sid=chat_instance_sid, )
Update the ServiceInstance :param unicode unique_name: An application-defined string that uniquely identifies the resource :param unicode default_ttl: Default TTL for a Session, in seconds :param unicode callback_url: The URL we should call when the interaction status changes :param ServiceInstance.GeoMatchLevel geo_match_level: Where a proxy number must be located relative to the participant identifier :param ServiceInstance.NumberSelectionBehavior number_selection_behavior: The preference for Proxy Number selection for the Service instance :param unicode intercept_callback_url: The URL we call on each interaction :param unicode out_of_session_callback_url: The URL we call when an inbound call or SMS action occurs on a closed or non-existent Session :param unicode chat_instance_sid: The SID of the Chat Service Instance :returns: Updated ServiceInstance :rtype: twilio.rest.proxy.v1.service.ServiceInstance
def get_lazystring_encoder(app): """Return a JSONEncoder for handling lazy strings from Babel. Installed on Flask application by default by :class:`InvenioI18N`. """ from speaklater import _LazyString class JSONEncoder(app.json_encoder): def default(self, o): if isinstance(o, _LazyString): return text_type(o) return super(JSONEncoder, self).default(o) return JSONEncoder
Return a JSONEncoder for handling lazy strings from Babel. Installed on Flask application by default by :class:`InvenioI18N`.
def clear(self, exclude=None): """ Remove all elements in the cache. """ if exclude is None: self.cache = {} else: self.cache = {k: v for k, v in self.cache.items() if k in exclude}
Remove all elements in the cache.
def on_message(self, message): """ Receiving a message from the websocket, parse, and act accordingly. """ msg = tornado.escape.json_decode(message) if msg['type'] == 'config_file': if self.application.verbose: print(msg['data']) self.config = list(yaml.load_all(msg['data'])) if len(self.config) > 1: error = 'Please, provide only one configuration.' if self.application.verbose: logger.error(error) self.write_message({'type': 'error', 'error': error}) return self.config = self.config[0] self.send_log('INFO.' + self.simulation_name, 'Using config: {name}'.format(name=self.config['name'])) if 'visualization_params' in self.config: self.write_message({'type': 'visualization_params', 'data': self.config['visualization_params']}) self.name = self.config['name'] self.run_simulation() settings = [] for key in self.config['environment_params']: if type(self.config['environment_params'][key]) == float or type(self.config['environment_params'][key]) == int: if self.config['environment_params'][key] <= 1: setting_type = 'number' else: setting_type = 'great_number' elif type(self.config['environment_params'][key]) == bool: setting_type = 'boolean' else: setting_type = 'undefined' settings.append({ 'label': key, 'type': setting_type, 'value': self.config['environment_params'][key] }) self.write_message({'type': 'settings', 'data': settings}) elif msg['type'] == 'get_trial': if self.application.verbose: logger.info('Trial {} requested!'.format(msg['data'])) self.send_log('INFO.' + __name__, 'Trial {} requested!'.format(msg['data'])) self.write_message({'type': 'get_trial', 'data': self.get_trial(int(msg['data']))}) elif msg['type'] == 'run_simulation': if self.application.verbose: logger.info('Running new simulation for {name}'.format(name=self.config['name'])) self.send_log('INFO.' + self.simulation_name, 'Running new simulation for {name}'.format(name=self.config['name'])) self.config['environment_params'] = msg['data'] self.run_simulation() elif msg['type'] == 'download_gexf': G = self.trials[ int(msg['data']) ].history_to_graph() for node in G.nodes(): if 'pos' in G.node[node]: G.node[node]['viz'] = {"position": {"x": G.node[node]['pos'][0], "y": G.node[node]['pos'][1], "z": 0.0}} del (G.node[node]['pos']) writer = nx.readwrite.gexf.GEXFWriter(version='1.2draft') writer.add_graph(G) self.write_message({'type': 'download_gexf', 'filename': self.config['name'] + '_trial_' + str(msg['data']), 'data': tostring(writer.xml).decode(writer.encoding) }) elif msg['type'] == 'download_json': G = self.trials[ int(msg['data']) ].history_to_graph() for node in G.nodes(): if 'pos' in G.node[node]: G.node[node]['viz'] = {"position": {"x": G.node[node]['pos'][0], "y": G.node[node]['pos'][1], "z": 0.0}} del (G.node[node]['pos']) self.write_message({'type': 'download_json', 'filename': self.config['name'] + '_trial_' + str(msg['data']), 'data': nx.node_link_data(G) }) else: if self.application.verbose: logger.info('Unexpected message!')
Receiving a message from the websocket, parse, and act accordingly.
def cleanup_dead_jobs(): """ This cleans up jobs that have been marked as ran, but are not queue'd in celery. It is meant to cleanup jobs that have been lost due to a server crash or some other reason a job is in limbo. """ from .models import WooeyJob # Get active tasks from Celery inspect = celery_app.control.inspect() active_tasks = {task['id'] for worker, tasks in six.iteritems(inspect.active()) for task in tasks} # find jobs that are marked as running but not present in celery's active tasks active_jobs = WooeyJob.objects.filter(status=WooeyJob.RUNNING) to_disable = set() for job in active_jobs: if job.celery_id not in active_tasks: to_disable.add(job.pk) WooeyJob.objects.filter(pk__in=to_disable).update(status=WooeyJob.FAILED)
This cleans up jobs that have been marked as ran, but are not queue'd in celery. It is meant to cleanup jobs that have been lost due to a server crash or some other reason a job is in limbo.
def partial_fit(self, X, y=None, classes=None, **fit_params): """Fit the module. If the module is initialized, it is not re-initialized, which means that this method should be used if you want to continue training a model (warm start). Parameters ---------- X : input data, compatible with skorch.dataset.Dataset By default, you should be able to pass: * numpy arrays * torch tensors * pandas DataFrame or Series * scipy sparse CSR matrices * a dictionary of the former three * a list/tuple of the former three * a Dataset If this doesn't work with your data, you have to pass a ``Dataset`` that can deal with the data. y : target data, compatible with skorch.dataset.Dataset The same data types as for ``X`` are supported. If your X is a Dataset that contains the target, ``y`` may be set to None. classes : array, sahpe (n_classes,) Solely for sklearn compatibility, currently unused. **fit_params : dict Additional parameters passed to the ``forward`` method of the module and to the ``self.train_split`` call. """ if not self.initialized_: self.initialize() self.notify('on_train_begin', X=X, y=y) try: self.fit_loop(X, y, **fit_params) except KeyboardInterrupt: pass self.notify('on_train_end', X=X, y=y) return self
Fit the module. If the module is initialized, it is not re-initialized, which means that this method should be used if you want to continue training a model (warm start). Parameters ---------- X : input data, compatible with skorch.dataset.Dataset By default, you should be able to pass: * numpy arrays * torch tensors * pandas DataFrame or Series * scipy sparse CSR matrices * a dictionary of the former three * a list/tuple of the former three * a Dataset If this doesn't work with your data, you have to pass a ``Dataset`` that can deal with the data. y : target data, compatible with skorch.dataset.Dataset The same data types as for ``X`` are supported. If your X is a Dataset that contains the target, ``y`` may be set to None. classes : array, sahpe (n_classes,) Solely for sklearn compatibility, currently unused. **fit_params : dict Additional parameters passed to the ``forward`` method of the module and to the ``self.train_split`` call.
def compute_number_edges(function): """ Compute the number of edges of the CFG Args: function (core.declarations.function.Function) Returns: int """ n = 0 for node in function.nodes: n += len(node.sons) return n
Compute the number of edges of the CFG Args: function (core.declarations.function.Function) Returns: int
def reversed(self): """returns a copy of the Arc object with its orientation reversed.""" return Arc(self.end, self.radius, self.rotation, self.large_arc, not self.sweep, self.start)
returns a copy of the Arc object with its orientation reversed.
def resolve_identifier(self, name, expected_type=None): """Resolve an identifier to an object. There is a single namespace for identifiers so the user also should pass an expected type that will be checked against what the identifier actually resolves to so that there are no surprises. Args: name (str): The name that we want to resolve expected_type (type): The type of object that we expect to receive. This is an optional parameter. If None is passed, no type checking is performed. Returns: object: The resolved object """ name = str(name) if name in self._known_identifiers: obj = self._known_identifiers[name] if expected_type is not None and not isinstance(obj, expected_type): raise UnresolvedIdentifierError(u"Identifier resolved to an object of an unexpected type", name=name, expected_type=expected_type.__name__, resolved_type=obj.__class__.__name__) return obj if self.parent is not None: try: return self.parent.resolve_identifier(name) except UnresolvedIdentifierError: pass raise UnresolvedIdentifierError(u"Could not resolve identifier", name=name, scope=self.name)
Resolve an identifier to an object. There is a single namespace for identifiers so the user also should pass an expected type that will be checked against what the identifier actually resolves to so that there are no surprises. Args: name (str): The name that we want to resolve expected_type (type): The type of object that we expect to receive. This is an optional parameter. If None is passed, no type checking is performed. Returns: object: The resolved object
def log_event(self, text, timestamp=None): """Add an arbitrary message to the log file as a global marker. :param str text: The group name of the marker. :param float timestamp: Absolute timestamp in Unix timestamp format. If not given, the marker will be placed along the last message. """ try: # Only works on Windows text = text.encode("mbcs") except LookupError: text = text.encode("ascii") comment = b"Added by python-can" marker = b"python-can" data = GLOBAL_MARKER_STRUCT.pack( 0, 0xFFFFFF, 0xFF3300, 0, len(text), len(marker), len(comment)) self._add_object(GLOBAL_MARKER, data + text + marker + comment, timestamp)
Add an arbitrary message to the log file as a global marker. :param str text: The group name of the marker. :param float timestamp: Absolute timestamp in Unix timestamp format. If not given, the marker will be placed along the last message.
def dict_dot(d, k, val=None, default=None): """Get or set value using a dot-notation key in a multilevel dict.""" if val is None and k == '': return d def set_default(dict_or_model, key, default_value): """Set default field value.""" if isinstance(dict_or_model, models.Model): if not hasattr(dict_or_model, key): setattr(dict_or_model, key, default_value) return getattr(dict_or_model, key) else: return dict_or_model.setdefault(key, default_value) def get_item(dict_or_model, key): """Get field value.""" if isinstance(dict_or_model, models.Model): return getattr(dict_or_model, key) else: return dict_or_model[key] def set_item(dict_or_model, key, value): """Set field value.""" if isinstance(dict_or_model, models.Model): setattr(dict_or_model, key, value) else: dict_or_model[key] = value if val is None and callable(default): # Get value, default for missing return functools.reduce(lambda a, b: set_default(a, b, default()), k.split('.'), d) elif val is None: # Get value, error on missing return functools.reduce(get_item, k.split('.'), d) else: # Set value try: k, k_last = k.rsplit('.', 1) set_item(dict_dot(d, k, default=dict), k_last, val) except ValueError: set_item(d, k, val) return val
Get or set value using a dot-notation key in a multilevel dict.
def q12d_local(vertices, lame, mu): """Local stiffness matrix for two dimensional elasticity on a square element. Parameters ---------- lame : Float Lame's first parameter mu : Float shear modulus See Also -------- linear_elasticity Notes ----- Vertices should be listed in counter-clockwise order:: [3]----[2] | | | | [0]----[1] Degrees of freedom are enumerated as follows:: [x=6,y=7]----[x=4,y=5] | | | | [x=0,y=1]----[x=2,y=3] """ M = lame + 2*mu # P-wave modulus R_11 = np.matrix([[2, -2, -1, 1], [-2, 2, 1, -1], [-1, 1, 2, -2], [1, -1, -2, 2]]) / 6.0 R_12 = np.matrix([[1, 1, -1, -1], [-1, -1, 1, 1], [-1, -1, 1, 1], [1, 1, -1, -1]]) / 4.0 R_22 = np.matrix([[2, 1, -1, -2], [1, 2, -2, -1], [-1, -2, 2, 1], [-2, -1, 1, 2]]) / 6.0 F = inv(np.vstack((vertices[1] - vertices[0], vertices[3] - vertices[0]))) K = np.zeros((8, 8)) # stiffness matrix E = F.T * np.matrix([[M, 0], [0, mu]]) * F K[0::2, 0::2] = E[0, 0] * R_11 + E[0, 1] * R_12 +\ E[1, 0] * R_12.T + E[1, 1] * R_22 E = F.T * np.matrix([[mu, 0], [0, M]]) * F K[1::2, 1::2] = E[0, 0] * R_11 + E[0, 1] * R_12 +\ E[1, 0] * R_12.T + E[1, 1] * R_22 E = F.T * np.matrix([[0, mu], [lame, 0]]) * F K[1::2, 0::2] = E[0, 0] * R_11 + E[0, 1] * R_12 +\ E[1, 0] * R_12.T + E[1, 1] * R_22 K[0::2, 1::2] = K[1::2, 0::2].T K /= det(F) return K
Local stiffness matrix for two dimensional elasticity on a square element. Parameters ---------- lame : Float Lame's first parameter mu : Float shear modulus See Also -------- linear_elasticity Notes ----- Vertices should be listed in counter-clockwise order:: [3]----[2] | | | | [0]----[1] Degrees of freedom are enumerated as follows:: [x=6,y=7]----[x=4,y=5] | | | | [x=0,y=1]----[x=2,y=3]
def write_calculations_to_csv(funcs, states, columns, path, headers, out_name, metaids=[], extension=".xls"): """Writes each output of the given functions on the given states and data columns to a new column in the specified output file. Note: Column 0 is time. The first data column is column 1. :param funcs: A function or list of functions which will be applied in order to the data. If only one function is given it is applied to all the states/columns :type funcs: function or function list :param states: The state ID numbers for which data should be extracted. List should be in order of calculation or if only one state is given then it will be used for all the calculations :type states: string or string list :param columns: The index of a column, the header of a column, a list of indexes, OR a list of headers of the column(s) that you want to apply calculations to :type columns: int, string, int list, or string list :param path: Path to your ProCoDA metafile (must be tab-delimited) :type path: string :param headers: List of the desired header for each calculation, in order :type headers: string list :param out_name: Desired name for the output file. Can include a relative path :type out_name: string :param metaids: A list of the experiment IDs you'd like to analyze from the metafile :type metaids: string list, optional :param extension: The file extension of the tab delimited file. Defaults to ".xls" if no argument is passed in :type extension: string, optional :requires: funcs, states, columns, and headers are all of the same length if they are lists. Some being lists and some single values are okay. :return: out_name.csv (CVS file) - A CSV file with the each column being a new calcuation and each row being a new experiment on which the calcuations were performed :return: output (Pandas.DataFrame)- Pandas DataFrame holding the same data that was written to the output file """ if not isinstance(funcs, list): funcs = [funcs] * len(headers) if not isinstance(states, list): states = [states] * len(headers) if not isinstance(columns, list): columns = [columns] * len(headers) data_agg = [] for i in range(len(headers)): ids, data = read_state_with_metafile(funcs[i], states[i], columns[i], path, metaids, extension) data_agg = np.append(data_agg, [data]) output = pd.DataFrame(data=np.vstack((ids, data_agg)).T, columns=["ID"]+headers) output.to_csv(out_name, sep='\t') return output
Writes each output of the given functions on the given states and data columns to a new column in the specified output file. Note: Column 0 is time. The first data column is column 1. :param funcs: A function or list of functions which will be applied in order to the data. If only one function is given it is applied to all the states/columns :type funcs: function or function list :param states: The state ID numbers for which data should be extracted. List should be in order of calculation or if only one state is given then it will be used for all the calculations :type states: string or string list :param columns: The index of a column, the header of a column, a list of indexes, OR a list of headers of the column(s) that you want to apply calculations to :type columns: int, string, int list, or string list :param path: Path to your ProCoDA metafile (must be tab-delimited) :type path: string :param headers: List of the desired header for each calculation, in order :type headers: string list :param out_name: Desired name for the output file. Can include a relative path :type out_name: string :param metaids: A list of the experiment IDs you'd like to analyze from the metafile :type metaids: string list, optional :param extension: The file extension of the tab delimited file. Defaults to ".xls" if no argument is passed in :type extension: string, optional :requires: funcs, states, columns, and headers are all of the same length if they are lists. Some being lists and some single values are okay. :return: out_name.csv (CVS file) - A CSV file with the each column being a new calcuation and each row being a new experiment on which the calcuations were performed :return: output (Pandas.DataFrame)- Pandas DataFrame holding the same data that was written to the output file
def diagonal_line(xi=None, yi=None, *, ax=None, c=None, ls=None, lw=None, zorder=3): """Plot a diagonal line. Parameters ---------- xi : 1D array-like (optional) The x axis points. If None, taken from axis limits. Default is None. yi : 1D array-like The y axis points. If None, taken from axis limits. Default is None. ax : axis (optional) Axis to plot on. If none is supplied, the current axis is used. c : string (optional) Line color. Default derives from rcParams grid color. ls : string (optional) Line style. Default derives from rcParams linestyle. lw : float (optional) Line width. Default derives from rcParams linewidth. zorder : number (optional) Matplotlib zorder. Default is 3. Returns ------- matplotlib.lines.Line2D object The plotted line. """ if ax is None: ax = plt.gca() # parse xi, yi if xi is None: xi = ax.get_xlim() if yi is None: yi = ax.get_ylim() # parse style if c is None: c = matplotlib.rcParams["grid.color"] if ls is None: ls = matplotlib.rcParams["grid.linestyle"] if lw is None: lw = matplotlib.rcParams["grid.linewidth"] # get axis if ax is None: ax = plt.gca() # make plot diag_min = max(min(xi), min(yi)) diag_max = min(max(xi), max(yi)) line = ax.plot([diag_min, diag_max], [diag_min, diag_max], c=c, ls=ls, lw=lw, zorder=zorder) return line
Plot a diagonal line. Parameters ---------- xi : 1D array-like (optional) The x axis points. If None, taken from axis limits. Default is None. yi : 1D array-like The y axis points. If None, taken from axis limits. Default is None. ax : axis (optional) Axis to plot on. If none is supplied, the current axis is used. c : string (optional) Line color. Default derives from rcParams grid color. ls : string (optional) Line style. Default derives from rcParams linestyle. lw : float (optional) Line width. Default derives from rcParams linewidth. zorder : number (optional) Matplotlib zorder. Default is 3. Returns ------- matplotlib.lines.Line2D object The plotted line.
def notify( self, force_notify=None, use_email=None, use_sms=None, email_body_template=None, **kwargs, ): """Notify / send an email and/or SMS. Main entry point. This notification class (me) knows from whom and to whom the notifications will be sent. See signals and kwargs are: * history_instance * instance * user """ email_sent = None sms_sent = None use_email = use_email or getattr(settings, "EMAIL_ENABLED", False) use_sms = use_sms or getattr(settings, "TWILIO_ENABLED", False) if force_notify or self._notify_on_condition(**kwargs): if use_email: email_body_template = ( email_body_template or self.email_body_template ) + self.email_footer_template email_sent = self.send_email( email_body_template=email_body_template, **kwargs ) if use_sms: sms_sent = self.send_sms(**kwargs) self.post_notification_actions( email_sent=email_sent, sms_sent=sms_sent, **kwargs ) return True if email_sent or sms_sent else False
Notify / send an email and/or SMS. Main entry point. This notification class (me) knows from whom and to whom the notifications will be sent. See signals and kwargs are: * history_instance * instance * user
def trace(): """Enables and disables request tracing.""" def fget(self): return self._options.get('trace', None) def fset(self, value): self._options['trace'] = value return locals()
Enables and disables request tracing.
def parse_acl(acl_string): """ Parse raw string :acl_string: of RAML-defined ACLs. If :acl_string: is blank or None, all permissions are given. Values of ACL action and principal are parsed using `actions` and `special_principals` maps and are looked up after `strip()` and `lower()`. ACEs in :acl_string: may be separated by newlines or semicolons. Action, principal and permission lists must be separated by spaces. Permissions must be comma-separated. E.g. 'allow everyone view,create,update' and 'deny authenticated delete' :param acl_string: Raw RAML string containing defined ACEs. """ if not acl_string: return [ALLOW_ALL] aces_list = acl_string.replace('\n', ';').split(';') aces_list = [ace.strip().split(' ', 2) for ace in aces_list if ace] aces_list = [(a, b, c.split(',')) for a, b, c in aces_list] result_acl = [] for action_str, princ_str, perms in aces_list: # Process action action_str = action_str.strip().lower() action = actions.get(action_str) if action is None: raise ValueError( 'Unknown ACL action: {}. Valid actions: {}'.format( action_str, list(actions.keys()))) # Process principal princ_str = princ_str.strip().lower() if princ_str in special_principals: principal = special_principals[princ_str] elif is_callable_tag(princ_str): principal = resolve_to_callable(princ_str) else: principal = princ_str # Process permissions permissions = parse_permissions(perms) result_acl.append((action, principal, permissions)) return result_acl
Parse raw string :acl_string: of RAML-defined ACLs. If :acl_string: is blank or None, all permissions are given. Values of ACL action and principal are parsed using `actions` and `special_principals` maps and are looked up after `strip()` and `lower()`. ACEs in :acl_string: may be separated by newlines or semicolons. Action, principal and permission lists must be separated by spaces. Permissions must be comma-separated. E.g. 'allow everyone view,create,update' and 'deny authenticated delete' :param acl_string: Raw RAML string containing defined ACEs.
def orcid_uri_to_orcid(value): "Strip the uri schema from the start of ORCID URL strings" if value is None: return value replace_values = ['http://orcid.org/', 'https://orcid.org/'] for replace_value in replace_values: value = value.replace(replace_value, '') return value
Strip the uri schema from the start of ORCID URL strings
def __iter_read_spectrum_meta(self): """ This method should only be called by __init__. Reads the data formats, coordinates and offsets from the .imzML file and initializes the respective attributes. While traversing the XML tree, the per-spectrum metadata is pruned, i.e. the <spectrumList> element(s) are left behind empty. Supported accession values for the number formats: "MS:1000521", "MS:1000523", "IMS:1000141" or "IMS:1000142". The string values are "32-bit float", "64-bit float", "32-bit integer", "64-bit integer". """ mz_group = int_group = None slist = None elem_iterator = self.iterparse(self.filename, events=("start", "end")) if sys.version_info > (3,): _, self.root = next(elem_iterator) else: _, self.root = elem_iterator.next() for event, elem in elem_iterator: if elem.tag == self.sl + "spectrumList" and event == "start": slist = elem elif elem.tag == self.sl + "spectrum" and event == "end": self.__process_spectrum(elem) slist.remove(elem) elif elem.tag == self.sl + "referenceableParamGroup" and event == "end": for param in elem: if param.attrib["name"] == "m/z array": self.mzGroupId = elem.attrib['id'] mz_group = elem elif param.attrib["name"] == "intensity array": self.intGroupId = elem.attrib['id'] int_group = elem self.__assign_precision(int_group, mz_group) self.__fix_offsets()
This method should only be called by __init__. Reads the data formats, coordinates and offsets from the .imzML file and initializes the respective attributes. While traversing the XML tree, the per-spectrum metadata is pruned, i.e. the <spectrumList> element(s) are left behind empty. Supported accession values for the number formats: "MS:1000521", "MS:1000523", "IMS:1000141" or "IMS:1000142". The string values are "32-bit float", "64-bit float", "32-bit integer", "64-bit integer".
def delete_eventtype(self, test_type_str=None): """Action: create dialog to delete event type.""" if test_type_str: answer = test_type_str, True else: answer = QInputDialog.getText(self, 'Delete Event Type', 'Enter event\'s name to delete') if answer[1]: self.annot.remove_event_type(answer[0]) self.display_eventtype() self.update_annotations()
Action: create dialog to delete event type.
def introspect_access_token(self, access_token_value): # type: (str) -> Dict[str, Union[str, List[str]]] """ Returns authorization data associated with the access token. See <a href="https://tools.ietf.org/html/rfc7662">"Token Introspection", Section 2.2</a>. """ if access_token_value not in self.access_tokens: raise InvalidAccessToken('{} unknown'.format(access_token_value)) authz_info = self.access_tokens[access_token_value] introspection = {'active': authz_info['exp'] >= int(time.time())} introspection_params = {k: v for k, v in authz_info.items() if k in TokenIntrospectionResponse.c_param} introspection.update(introspection_params) return introspection
Returns authorization data associated with the access token. See <a href="https://tools.ietf.org/html/rfc7662">"Token Introspection", Section 2.2</a>.
def _reload(self): """ Gets every registered form's field value.\ If a field name is found in the db, it will load it from there.\ Otherwise, the initial value from the field form is used """ ConfigModel = apps.get_model('djconfig.Config') cache = {} data = dict( ConfigModel.objects .all() .values_list('key', 'value')) # populate cache with initial form values, # then with cleaned database values, # then with raw database file/image paths for form_class in self._registry: empty_form = form_class() cache.update({ name: field.initial for name, field in empty_form.fields.items()}) form = form_class(data={ name: _deserialize(data[name], field) for name, field in empty_form.fields.items() if name in data and not isinstance(field, forms.FileField)}) form.is_valid() cache.update({ name: _unlazify(value) for name, value in form.cleaned_data.items() if name in data}) # files are special because they don't have an initial value # and the POSTED data must contain the file. So, we keep # the stored path as is # TODO: see if serialize/deserialize/unlazify can be used for this instead cache.update({ name: data[name] for name, field in empty_form.fields.items() if name in data and isinstance(field, forms.FileField)}) cache['_updated_at'] = data.get('_updated_at') self._cache = cache
Gets every registered form's field value.\ If a field name is found in the db, it will load it from there.\ Otherwise, the initial value from the field form is used
def open(self): """Open the subtitle file into an Aeidon project.""" try: self.project.open_main(self.filename) except UnicodeDecodeError: with open(self.filename, 'rb') as openfile: encoding = get_encoding(openfile.read()) try: self.project.open_main(self.filename, encoding) except UnicodeDecodeError: LOGGER.error("'%s' encountered a fatal encoding error", self.filename) sys.exit(1) except: # pylint: disable=W0702 open_error(self.filename) except: # pylint: disable=W0702 open_error(self.filename)
Open the subtitle file into an Aeidon project.
def from_gpx(gpx_segment): """ Creates a segment from a GPX format. No preprocessing is done. Arguments: gpx_segment (:obj:`gpxpy.GPXTrackSegment`) Return: :obj:`Segment` """ points = [] for point in gpx_segment.points: points.append(Point.from_gpx(point)) return Segment(points)
Creates a segment from a GPX format. No preprocessing is done. Arguments: gpx_segment (:obj:`gpxpy.GPXTrackSegment`) Return: :obj:`Segment`
def get_options(self): """Process the command line. """ args = self.parse_options(self.args) if args: self.directory = args[0] if self.develop: self.skiptag = True if not self.develop: self.develop = self.defaults.develop if not self.develop: self.infoflags = self.setuptools.infoflags if not self.formats: self.formats = self.defaults.formats for format in self.formats: if format == 'zip': self.distributions.append(('sdist', ['--formats="zip"'])) elif format == 'gztar': self.distributions.append(('sdist', ['--formats="gztar"'])) elif format == 'egg': self.distributions.append(('bdist', ['--formats="egg"'])) elif format == 'wheel': self.distributions.append(('bdist_wheel', [])) if not self.distributions: self.distributions.append(('sdist', ['--formats="zip"'])) if self.list: self.list_locations() if not self.locations: self.locations.extend(self.locations.get_default_location()) if not (self.skipregister and self.skipupload): if not (self.get_skipregister() and self.get_skipupload()): self.locations.check_empty_locations() self.locations.check_valid_locations() if len(args) > 1: if self.urlparser.is_url(self.directory): self.branch = args[1] elif self.urlparser.is_ssh_url(self.directory): self.branch = args[1] else: err_exit('mkrelease: invalid arguments\n%s' % USAGE) if len(args) > 2: err_exit('mkrelease: too many arguments\n%s' % USAGE)
Process the command line.
def _get_mean(self, vs30, mag, rrup, imt, scale_fac): """ Compute and return mean """ C_HR, C_BC, C_SR, SC = self._extract_coeffs(imt) rrup = self._clip_distances(rrup) f0 = self._compute_f0_factor(rrup) f1 = self._compute_f1_factor(rrup) f2 = self._compute_f2_factor(rrup) pga_bc = self._get_pga_bc( f0, f1, f2, SC, mag, rrup, vs30, scale_fac ) # compute mean values for hard-rock sites (vs30 >= 2000), # and non-hard-rock sites (vs30 < 2000) and add soil amplification # term mean = np.zeros_like(vs30) self._compute_mean(C_HR, f0, f1, f2, SC, mag, rrup, vs30 >= 2000.0, mean, scale_fac) self._compute_mean(C_BC, f0, f1, f2, SC, mag, rrup, vs30 < 2000.0, mean, scale_fac) self._compute_soil_amplification(C_SR, vs30, pga_bc, mean) # convert from base 10 to base e if imt == PGV(): mean = np.log(10 ** mean) else: # convert from cm/s**2 to g mean = np.log((10 ** mean) * 1e-2 / g) return mean
Compute and return mean
def plot_forest( data, kind="forestplot", model_names=None, var_names=None, combined=False, credible_interval=0.94, rope=None, quartiles=True, ess=False, r_hat=False, colors="cycle", textsize=None, linewidth=None, markersize=None, ridgeplot_alpha=None, ridgeplot_overlap=2, figsize=None, ): """Forest plot to compare credible intervals from a number of distributions. Generates a forest plot of 100*(credible_interval)% credible intervals from a trace or list of traces. Parameters ---------- data : obj or list[obj] Any object that can be converted to an az.InferenceData object Refer to documentation of az.convert_to_dataset for details kind : str Choose kind of plot for main axis. Supports "forestplot" or "ridgeplot" model_names : list[str], optional List with names for the models in the list of data. Useful when plotting more that one dataset var_names: list[str], optional List of variables to plot (defaults to None, which results in all variables plotted) combined : bool Flag for combining multiple chains into a single chain. If False (default), chains will be plotted separately. credible_interval : float, optional Credible interval to plot. Defaults to 0.94. rope: tuple or dictionary of tuples Lower and upper values of the Region Of Practical Equivalence. If a list with one interval only is provided, the ROPE will be displayed across the y-axis. If more than one interval is provided the length of the list should match the number of variables. quartiles : bool, optional Flag for plotting the interquartile range, in addition to the credible_interval intervals. Defaults to True r_hat : bool, optional Flag for plotting Split R-hat statistics. Requires 2 or more chains. Defaults to False ess : bool, optional Flag for plotting the effective sample size. Requires 2 or more chains. Defaults to False colors : list or string, optional list with valid matplotlib colors, one color per model. Alternative a string can be passed. If the string is `cycle`, it will automatically chose a color per model from the matplotlibs cycle. If a single color is passed, eg 'k', 'C2', 'red' this color will be used for all models. Defauls to 'cycle'. textsize: float Text size scaling factor for labels, titles and lines. If None it will be autoscaled based on figsize. linewidth : int Line width throughout. If None it will be autoscaled based on figsize. markersize : int Markersize throughout. If None it will be autoscaled based on figsize. ridgeplot_alpha : float Transparency for ridgeplot fill. If 0, border is colored by model, otherwise a black outline is used. ridgeplot_overlap : float Overlap height for ridgeplots. figsize : tuple Figure size. If None it will be defined automatically. Returns ------- gridspec : matplotlib GridSpec Examples -------- Forestpĺot .. plot:: :context: close-figs >>> import arviz as az >>> non_centered_data = az.load_arviz_data('non_centered_eight') >>> fig, axes = az.plot_forest(non_centered_data, >>> kind='forestplot', >>> var_names=['theta'], >>> combined=True, >>> ridgeplot_overlap=3, >>> figsize=(9, 7)) >>> axes[0].set_title('Estimated theta for 8 schools model') Ridgeplot .. plot:: :context: close-figs >>> fig, axes = az.plot_forest(non_centered_data, >>> kind='ridgeplot', >>> var_names=['theta'], >>> combined=True, >>> ridgeplot_overlap=3, >>> colors='white', >>> figsize=(9, 7)) >>> axes[0].set_title('Estimated theta for 8 schools model') """ if not isinstance(data, (list, tuple)): data = [data] datasets = [convert_to_dataset(datum) for datum in reversed(data)] var_names = _var_names(var_names, datasets) ncols, width_ratios = 1, [3] if ess: ncols += 1 width_ratios.append(1) if r_hat: ncols += 1 width_ratios.append(1) plot_handler = PlotHandler( datasets, var_names=var_names, model_names=model_names, combined=combined, colors=colors ) if figsize is None: figsize = (min(12, sum(width_ratios) * 2), plot_handler.fig_height()) (figsize, _, titlesize, xt_labelsize, auto_linewidth, auto_markersize) = _scale_fig_size( figsize, textsize, 1.1, 1 ) if linewidth is None: linewidth = auto_linewidth if markersize is None: markersize = auto_markersize fig, axes = plt.subplots( nrows=1, ncols=ncols, figsize=figsize, gridspec_kw={"width_ratios": width_ratios}, sharey=True, constrained_layout=True, ) axes = np.atleast_1d(axes) if kind == "forestplot": plot_handler.forestplot( credible_interval, quartiles, xt_labelsize, titlesize, linewidth, markersize, axes[0], rope, ) elif kind == "ridgeplot": plot_handler.ridgeplot(ridgeplot_overlap, linewidth, ridgeplot_alpha, axes[0]) else: raise TypeError( "Argument 'kind' must be one of 'forestplot' or " "'ridgeplot' (you provided {})".format(kind) ) idx = 1 if ess: plot_handler.plot_neff(axes[idx], xt_labelsize, titlesize, markersize) idx += 1 if r_hat: plot_handler.plot_rhat(axes[idx], xt_labelsize, titlesize, markersize) idx += 1 for ax in axes: ax.grid(False) # Remove ticklines on y-axes for ticks in ax.yaxis.get_major_ticks(): ticks.tick1On = False ticks.tick2On = False for loc, spine in ax.spines.items(): if loc in ["left", "right"]: spine.set_visible(False) if len(plot_handler.data) > 1: plot_handler.make_bands(ax) labels, ticks = plot_handler.labels_and_ticks() axes[0].set_yticks(ticks) axes[0].set_yticklabels(labels) all_plotters = list(plot_handler.plotters.values()) y_max = plot_handler.y_max() - all_plotters[-1].group_offset if kind == "ridgeplot": # space at the top y_max += ridgeplot_overlap axes[0].set_ylim(-all_plotters[0].group_offset, y_max) return fig, axes
Forest plot to compare credible intervals from a number of distributions. Generates a forest plot of 100*(credible_interval)% credible intervals from a trace or list of traces. Parameters ---------- data : obj or list[obj] Any object that can be converted to an az.InferenceData object Refer to documentation of az.convert_to_dataset for details kind : str Choose kind of plot for main axis. Supports "forestplot" or "ridgeplot" model_names : list[str], optional List with names for the models in the list of data. Useful when plotting more that one dataset var_names: list[str], optional List of variables to plot (defaults to None, which results in all variables plotted) combined : bool Flag for combining multiple chains into a single chain. If False (default), chains will be plotted separately. credible_interval : float, optional Credible interval to plot. Defaults to 0.94. rope: tuple or dictionary of tuples Lower and upper values of the Region Of Practical Equivalence. If a list with one interval only is provided, the ROPE will be displayed across the y-axis. If more than one interval is provided the length of the list should match the number of variables. quartiles : bool, optional Flag for plotting the interquartile range, in addition to the credible_interval intervals. Defaults to True r_hat : bool, optional Flag for plotting Split R-hat statistics. Requires 2 or more chains. Defaults to False ess : bool, optional Flag for plotting the effective sample size. Requires 2 or more chains. Defaults to False colors : list or string, optional list with valid matplotlib colors, one color per model. Alternative a string can be passed. If the string is `cycle`, it will automatically chose a color per model from the matplotlibs cycle. If a single color is passed, eg 'k', 'C2', 'red' this color will be used for all models. Defauls to 'cycle'. textsize: float Text size scaling factor for labels, titles and lines. If None it will be autoscaled based on figsize. linewidth : int Line width throughout. If None it will be autoscaled based on figsize. markersize : int Markersize throughout. If None it will be autoscaled based on figsize. ridgeplot_alpha : float Transparency for ridgeplot fill. If 0, border is colored by model, otherwise a black outline is used. ridgeplot_overlap : float Overlap height for ridgeplots. figsize : tuple Figure size. If None it will be defined automatically. Returns ------- gridspec : matplotlib GridSpec Examples -------- Forestpĺot .. plot:: :context: close-figs >>> import arviz as az >>> non_centered_data = az.load_arviz_data('non_centered_eight') >>> fig, axes = az.plot_forest(non_centered_data, >>> kind='forestplot', >>> var_names=['theta'], >>> combined=True, >>> ridgeplot_overlap=3, >>> figsize=(9, 7)) >>> axes[0].set_title('Estimated theta for 8 schools model') Ridgeplot .. plot:: :context: close-figs >>> fig, axes = az.plot_forest(non_centered_data, >>> kind='ridgeplot', >>> var_names=['theta'], >>> combined=True, >>> ridgeplot_overlap=3, >>> colors='white', >>> figsize=(9, 7)) >>> axes[0].set_title('Estimated theta for 8 schools model')
def _timeout_thread(self, remain): """Timeout before releasing every thing, if nothing was returned""" time.sleep(remain) if not self._ended: self._ended = True self._release_all()
Timeout before releasing every thing, if nothing was returned
def raise_if(self, exception, message, *args, **kwargs): """ If current exception has smaller priority than minimum, subclass of this class only warns user, otherwise normal exception will be raised. """ if issubclass(exception, self.minimum_defect): raise exception(*args, **kwargs) warn(message, SyntaxWarning, *args, **kwargs)
If current exception has smaller priority than minimum, subclass of this class only warns user, otherwise normal exception will be raised.
def k8s_ports_to_metadata_ports(k8s_ports): """ :param k8s_ports: list of V1ServicePort :return: list of str, list of exposed ports, example: - ['1234/tcp', '8080/udp'] """ ports = [] for k8s_port in k8s_ports: if k8s_port.protocol is not None: ports.append("%s/%s" % (k8s_port.port, k8s_port.protocol.lower())) else: ports.append(str(k8s_port.port)) return ports
:param k8s_ports: list of V1ServicePort :return: list of str, list of exposed ports, example: - ['1234/tcp', '8080/udp']
def set_widgets(self): """Set widgets on the Aggregation Layer Origin Type tab.""" # First, list available layers in order to check if there are # any available layers. Note This will be repeated in # set_widgets_step_fc_agglayer_from_canvas because we need # to list them again after coming back from the Keyword Wizard. self.parent.step_fc_agglayer_from_canvas.\ list_compatible_canvas_layers() lst_wdg = self.parent.step_fc_agglayer_from_canvas.lstCanvasAggLayers if lst_wdg.count(): self.rbAggLayerFromCanvas.setText(tr( 'I would like to use an aggregation layer already loaded in ' 'QGIS\n' '(launches the %s for aggregation if needed)' ) % self.parent.keyword_creation_wizard_name) self.rbAggLayerFromCanvas.setEnabled(True) self.rbAggLayerFromCanvas.click() else: self.rbAggLayerFromCanvas.setText(tr( 'I would like to use an aggregation layer already loaded in ' 'QGIS\n' '(no suitable layers found)')) self.rbAggLayerFromCanvas.setEnabled(False) self.rbAggLayerFromBrowser.click() # Set icon self.lblIconIFCWAggregationOrigin.setPixmap(QPixmap(None))
Set widgets on the Aggregation Layer Origin Type tab.
def summary(self): """ Gets summary (e.g. accuracy/precision/recall, objective history, total iterations) of model trained on the training set. An exception is thrown if `trainingSummary is None`. """ if self.hasSummary: if self.numClasses <= 2: return BinaryLogisticRegressionTrainingSummary(super(LogisticRegressionModel, self).summary) else: return LogisticRegressionTrainingSummary(super(LogisticRegressionModel, self).summary) else: raise RuntimeError("No training summary available for this %s" % self.__class__.__name__)
Gets summary (e.g. accuracy/precision/recall, objective history, total iterations) of model trained on the training set. An exception is thrown if `trainingSummary is None`.
def set_creator(self, value: Union[Literal, Identifier, str], lang: str= None): """ Set the DC Creator literal value :param value: Value of the creator node :param lang: Language in which the value is """ self.metadata.add(key=DC.creator, value=value, lang=lang)
Set the DC Creator literal value :param value: Value of the creator node :param lang: Language in which the value is
def get_clan(self, tag: crtag, timeout: int=None): """Get inforamtion about a clan Parameters ---------- tag: str A valid tournament tag. Minimum length: 3 Valid characters: 0289PYLQGRJCUV timeout: Optional[int] = None Custom timeout that overwrites Client.timeout """ url = self.api.CLAN + '/' + tag return self._get_model(url, FullClan, timeout=timeout)
Get inforamtion about a clan Parameters ---------- tag: str A valid tournament tag. Minimum length: 3 Valid characters: 0289PYLQGRJCUV timeout: Optional[int] = None Custom timeout that overwrites Client.timeout
def CSWAP(control, target_1, target_2): """Produces a controlled-SWAP gate. This gate conditionally swaps the state of two qubits:: CSWAP = [[1, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1]] :param control: The control qubit. :param target-1: The first target qubit. :param target-2: The second target qubit. The two target states are swapped if the control is in the ``|1>`` state. """ qubits = [unpack_qubit(q) for q in (control, target_1, target_2)] return Gate(name="CSWAP", params=[], qubits=qubits)
Produces a controlled-SWAP gate. This gate conditionally swaps the state of two qubits:: CSWAP = [[1, 0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0, 1]] :param control: The control qubit. :param target-1: The first target qubit. :param target-2: The second target qubit. The two target states are swapped if the control is in the ``|1>`` state.
def whitelisted(argument=None): """Decorates a method requiring that the requesting IP address is whitelisted. Requires a whitelist value as a list in the Application.settings dictionary. IP addresses can be an individual IP address or a subnet. Examples: ['10.0.0.0/8','192.168.1.0/24', '1.2.3.4/32'] :param list argument: List of whitelisted ip addresses or blocks :raises: web.HTTPError :raises: ValueError :rtype: any """ def is_whitelisted(remote_ip, whitelist): """Check to see if an IP address is whitelisted. :param str ip_address: The IP address to check :param list whitelist: The whitelist to check against :rtype: bool """ # Convert the ip into a long int version of the ip address user_ip = ipaddr.IPv4Address(remote_ip) # Loop through the ranges in the whitelist and check if any([user_ip in ipaddr.IPv4Network(entry) for entry in whitelist]): return True return False # If the argument is a function then there were no parameters if type(argument) is types.FunctionType: def wrapper(self, *args, **kwargs): """Check the whitelist against our application.settings dictionary whitelist key. :rtype: any :raises: web.HTTPError """ # Validate we have a configured whitelist if 'whitelist' not in self.application.settings: raise ValueError('whitelist not found in Application.settings') # If the IP address is whitelisted, call the wrapped function if is_whitelisted(self.request.remote_ip, self.application.settings['whitelist']): # Call the original function, IP is whitelisted return argument(self, *args, **kwargs) # The ip address was not in the whitelist raise web.HTTPError(403) # Return the wrapper method return wrapper # They passed in string or list? else: # Convert a single ip address to a list if isinstance(argument, str): argument = [argument] # Make sure it's a list elif not isinstance(argument, list): raise ValueError('whitelisted requires no parameters or ' 'a string or list') def argument_wrapper(method): """Wrapper for a method passing in the IP addresses that constitute the whitelist. :param method method: The method being wrapped :rtype: any :raises: web.HTTPError """ def validate(self, *args, **kwargs): """ Validate the ip address agross the list of ip addresses passed in as a list """ if is_whitelisted(self.request.remote_ip, argument): # Call the original function, IP is whitelisted return method(self, *args, **kwargs) # The ip address was not in the whitelist raise web.HTTPError(403) # Return the validate method return validate # Return the wrapper method return argument_wrapper
Decorates a method requiring that the requesting IP address is whitelisted. Requires a whitelist value as a list in the Application.settings dictionary. IP addresses can be an individual IP address or a subnet. Examples: ['10.0.0.0/8','192.168.1.0/24', '1.2.3.4/32'] :param list argument: List of whitelisted ip addresses or blocks :raises: web.HTTPError :raises: ValueError :rtype: any
def insertDataset(self, businput): """ input dictionary must have the following keys: dataset, primary_ds_name(name), processed_ds(name), data_tier(name), acquisition_era(name), processing_version It may have following keys: physics_group(name), xtcrosssection, creation_date, create_by, last_modification_date, last_modified_by """ if not ("primary_ds_name" in businput and "dataset" in businput and "dataset_access_type" in businput and "processed_ds_name" in businput ): dbsExceptionHandler('dbsException-invalid-input', "business/DBSDataset/insertDataset must have dataset,\ dataset_access_type, primary_ds_name, processed_ds_name as input") if "data_tier_name" not in businput: dbsExceptionHandler('dbsException-invalid-input', "insertDataset must have data_tier_name as input.") conn = self.dbi.connection() tran = conn.begin() try: dsdaoinput = {} dsdaoinput["primary_ds_name"] = businput["primary_ds_name"] dsdaoinput["data_tier_name"] = businput["data_tier_name"].upper() dsdaoinput["dataset_access_type"] = businput["dataset_access_type"].upper() #not required pre-exist in the db. will insert with the dataset if not in yet #processed_ds_name=acquisition_era_name[-fileter_name][-processing_str]-vprocessing_version Changed as 4/30/2012 YG. #althrough acquisition era and processing version is not required for a dataset in the schema(the schema is build this way because #we need to accomdate the DBS2 data), but we impose the requirement on the API. So both acquisition and processing eras are required #YG 12/07/2011 TK-362 if "acquisition_era_name" in businput and "processing_version" in businput: erals=businput["processed_ds_name"].rsplit('-') if erals[0]==businput["acquisition_era_name"] and erals[len(erals)-1]=="%s%s"%("v", businput["processing_version"]): dsdaoinput["processed_ds_name"] = businput["processed_ds_name"] else: dbsExceptionHandler('dbsException-invalid-input', "insertDataset:\ processed_ds_name=acquisition_era_name[-filter_name][-processing_str]-vprocessing_version must be satisified.") else: dbsExceptionHandler("dbsException-missing-data", "insertDataset: Required acquisition_era_name or processing_version is not found in the input") if "physics_group_name" in businput: dsdaoinput["physics_group_id"] = self.phygrpid.execute(conn, businput["physics_group_name"]) if dsdaoinput["physics_group_id"] == -1: dbsExceptionHandler("dbsException-missing-data", "insertDataset. physics_group_name not found in DB") else: dsdaoinput["physics_group_id"] = None dsdaoinput["dataset_id"] = self.sm.increment(conn, "SEQ_DS") # we are better off separating out what we need for the dataset DAO dsdaoinput.update({ "dataset" : "/%s/%s/%s" % (businput["primary_ds_name"], businput["processed_ds_name"], businput["data_tier_name"].upper()), "prep_id" : businput.get("prep_id", None), "xtcrosssection" : businput.get("xtcrosssection", None), "creation_date" : businput.get("creation_date", dbsUtils().getTime() ), "create_by" : businput.get("create_by", dbsUtils().getCreateBy()) , "last_modification_date" : businput.get("last_modification_date", dbsUtils().getTime()), #"last_modified_by" : businput.get("last_modified_by", dbsUtils().getModifiedBy()) "last_modified_by" : dbsUtils().getModifiedBy() }) """ repeated again, why? comment out by YG 3/14/2012 #physics group if "physics_group_name" in businput: dsdaoinput["physics_group_id"] = self.phygrpid.execute(conn, businput["physics_group_name"]) if dsdaoinput["physics_group_id"] == -1: dbsExceptionHandler("dbsException-missing-data", "insertDataset. Physics Group : %s Not found" % businput["physics_group_name"]) else: dsdaoinput["physics_group_id"] = None """ # See if Processing Era exists if "processing_version" in businput and businput["processing_version"] != 0: dsdaoinput["processing_era_id"] = self.proceraid.execute(conn, businput["processing_version"]) if dsdaoinput["processing_era_id"] == -1 : dbsExceptionHandler("dbsException-missing-data", "DBSDataset/insertDataset: processing_version not found in DB") else: dbsExceptionHandler("dbsException-invalid-input", "DBSDataset/insertDataset: processing_version is required") # See if Acquisition Era exists if "acquisition_era_name" in businput: dsdaoinput["acquisition_era_id"] = self.acqeraid.execute(conn, businput["acquisition_era_name"]) if dsdaoinput["acquisition_era_id"] == -1: dbsExceptionHandler("dbsException-missing-data", "DBSDataset/insertDataset: acquisition_era_name not found in DB") else: dbsExceptionHandler("dbsException-invalid-input", "DBSDataset/insertDataset: acquisition_era_name is required") try: # insert the dataset self.datasetin.execute(conn, dsdaoinput, tran) except SQLAlchemyIntegrityError as ex: if (str(ex).lower().find("unique constraint") != -1 or str(ex).lower().find("duplicate") != -1): # dataset already exists, lets fetch the ID self.logger.warning( "Unique constraint violation being ignored...") self.logger.warning("%s" % ex) ds = "/%s/%s/%s" % (businput["primary_ds_name"], businput["processed_ds_name"], businput["data_tier_name"].upper()) dsdaoinput["dataset_id"] = self.datasetid.execute(conn, ds ) if dsdaoinput["dataset_id"] == -1 : dbsExceptionHandler("dbsException-missing-data", "DBSDataset/insertDataset. Strange error, the dataset %s does not exist ?" % ds ) if (str(ex).find("ORA-01400") ) != -1 : dbsExceptionHandler("dbsException-missing-data", "insertDataset must have: dataset,\ primary_ds_name, processed_ds_name, data_tier_name ") except Exception as e: raise #FIXME : What about the READ-only status of the dataset #There is no READ-oly status for a dataset. # Create dataset_output_mod_mod_configs mapping if "output_configs" in businput: for anOutConfig in businput["output_configs"]: dsoutconfdaoin = {} dsoutconfdaoin["dataset_id"] = dsdaoinput["dataset_id"] dsoutconfdaoin["output_mod_config_id"] = self.outconfigid.execute(conn, anOutConfig["app_name"], anOutConfig["release_version"], anOutConfig["pset_hash"], anOutConfig["output_module_label"], anOutConfig["global_tag"]) if dsoutconfdaoin["output_mod_config_id"] == -1 : dbsExceptionHandler("dbsException-missing-data", "DBSDataset/insertDataset: Output config (%s, %s, %s, %s, %s) not found" % (anOutConfig["app_name"], anOutConfig["release_version"], anOutConfig["pset_hash"], anOutConfig["output_module_label"], anOutConfig["global_tag"])) try: self.datasetoutmodconfigin.execute(conn, dsoutconfdaoin, tran) except Exception as ex: if str(ex).lower().find("unique constraint") != -1 or str(ex).lower().find("duplicate") != -1: pass else: raise # Dataset parentage will NOT be added by this API it will be set by insertFiles()--deduced by insertFiles # Dataset runs will NOT be added by this API they will be set by insertFiles()--deduced by insertFiles OR insertRun API call tran.commit() tran = None except Exception: if tran: tran.rollback() tran = None raise finally: if tran: tran.rollback() if conn: conn.close()
input dictionary must have the following keys: dataset, primary_ds_name(name), processed_ds(name), data_tier(name), acquisition_era(name), processing_version It may have following keys: physics_group(name), xtcrosssection, creation_date, create_by, last_modification_date, last_modified_by
def _get_model(vehicle): """Clean the model field. Best guess.""" model = vehicle['model'] model = model.replace(vehicle['year'], '') model = model.replace(vehicle['make'], '') return model.strip().split(' ')[0]
Clean the model field. Best guess.
def mkdir(self, path, parents=True, mode=0o755, raise_if_exists=False): """ Has no returnvalue (just like WebHDFS) """ if not parents or raise_if_exists: warnings.warn('webhdfs mkdir: parents/raise_if_exists not implemented') permission = int(oct(mode)[2:]) # Convert from int(decimal) to int(octal) self.client.makedirs(path, permission=permission)
Has no returnvalue (just like WebHDFS)
def cur_time(typ='date', tz=DEFAULT_TZ, trading=True, cal='US'): """ Current time Args: typ: one of ['date', 'time', 'time_path', 'raw', ''] tz: timezone trading: check if current date is trading day cal: trading calendar Returns: relevant current time or date Examples: >>> cur_dt = pd.Timestamp('now') >>> cur_time(typ='date', trading=False) == cur_dt.strftime('%Y-%m-%d') True >>> cur_time(typ='time', trading=False) == cur_dt.strftime('%Y-%m-%d %H:%M:%S') True >>> cur_time(typ='time_path', trading=False) == cur_dt.strftime('%Y-%m-%d/%H-%M-%S') True >>> isinstance(cur_time(typ='raw', tz='Europe/London'), pd.Timestamp) True >>> isinstance(cur_time(typ='raw', trading=True), pd.Timestamp) True >>> cur_time(typ='', trading=False) == cur_dt.date() True """ dt = pd.Timestamp('now', tz=tz) if typ == 'date': if trading: return trade_day(dt=dt, cal=cal).strftime('%Y-%m-%d') else: return dt.strftime('%Y-%m-%d') if typ == 'time': return dt.strftime('%Y-%m-%d %H:%M:%S') if typ == 'time_path': return dt.strftime('%Y-%m-%d/%H-%M-%S') if typ == 'raw': return dt return trade_day(dt).date() if trading else dt.date()
Current time Args: typ: one of ['date', 'time', 'time_path', 'raw', ''] tz: timezone trading: check if current date is trading day cal: trading calendar Returns: relevant current time or date Examples: >>> cur_dt = pd.Timestamp('now') >>> cur_time(typ='date', trading=False) == cur_dt.strftime('%Y-%m-%d') True >>> cur_time(typ='time', trading=False) == cur_dt.strftime('%Y-%m-%d %H:%M:%S') True >>> cur_time(typ='time_path', trading=False) == cur_dt.strftime('%Y-%m-%d/%H-%M-%S') True >>> isinstance(cur_time(typ='raw', tz='Europe/London'), pd.Timestamp) True >>> isinstance(cur_time(typ='raw', trading=True), pd.Timestamp) True >>> cur_time(typ='', trading=False) == cur_dt.date() True
def _worker_queue_scheduled_tasks(self): """ Helper method that takes due tasks from the SCHEDULED queue and puts them in the QUEUED queue for execution. This should be called periodically. """ queues = set(self._filter_queues(self.connection.smembers( self._key(SCHEDULED)))) now = time.time() for queue in queues: # Move due items from the SCHEDULED queue to the QUEUED queue. If # items were moved, remove the queue from the scheduled set if it # is empty, and add it to the queued set so the task gets picked # up. If any unique tasks are already queued, don't update their # queue time (because the new queue time would be later). result = self.scripts.zpoppush( self._key(SCHEDULED, queue), self._key(QUEUED, queue), self.config['SCHEDULED_TASK_BATCH_SIZE'], now, now, if_exists=('noupdate',), on_success=('update_sets', queue, self._key(SCHEDULED), self._key(QUEUED)), ) self.log.debug('scheduled tasks', queue=queue, qty=len(result)) # XXX: ideally this would be in the same pipeline, but we only want # to announce if there was a result. if result: self.connection.publish(self._key('activity'), queue) self._did_work = True
Helper method that takes due tasks from the SCHEDULED queue and puts them in the QUEUED queue for execution. This should be called periodically.
def insertTopLevelItem( self, index, item ): """ Inserts the inputed item at the given index in the tree. :param index | <int> item | <XGanttWidgetItem> """ self.treeWidget().insertTopLevelItem(index, item) if self.updatesEnabled(): try: item.sync(recursive = True) except AttributeError: pass
Inserts the inputed item at the given index in the tree. :param index | <int> item | <XGanttWidgetItem>
def make_symbols(symbols, *args): """Return a list of uppercase strings like "GOOG", "$SPX, "XOM"... Arguments: symbols (str or list of str): list of market ticker symbols to normalize If `symbols` is a str a get_symbols_from_list() call is used to retrieve the list of symbols Returns: list of str: list of cananical ticker symbol strings (typically after .upper().strip()) See Also: pug.dj.db.normalize_names Examples: >>> make_symbols("Goog") ['GOOG'] >>> make_symbols(" $SPX ", " aaPL ") ['$SPX', 'AAPL'] >>> make_symbols(["$SPX", ["GOOG", "AAPL"]]) ['GOOG', 'AAPL', '$SPX'] >>> make_symbols(" $Spy, Goog, aAPL ") ['$SPY', 'GOOG', 'AAPL'] """ if (hasattr(symbols, '__iter__') and not any(symbols)) \ or (isinstance(symbols, (list, tuple, Mapping)) and not symbols): return [] if isinstance(symbols, basestring): # # FIXME: find a direct API for listing all possible symbols # try: # return list(set(dataobj.get_symbols_from_list(symbols))) # except: return [s.upper().strip() for s in (symbols.split(',') + list(str(a) for a in args))] else: ans = [] for sym in (list(symbols) + list(args)): tmp = make_symbols(sym) ans = ans + tmp return list(set(ans))
Return a list of uppercase strings like "GOOG", "$SPX, "XOM"... Arguments: symbols (str or list of str): list of market ticker symbols to normalize If `symbols` is a str a get_symbols_from_list() call is used to retrieve the list of symbols Returns: list of str: list of cananical ticker symbol strings (typically after .upper().strip()) See Also: pug.dj.db.normalize_names Examples: >>> make_symbols("Goog") ['GOOG'] >>> make_symbols(" $SPX ", " aaPL ") ['$SPX', 'AAPL'] >>> make_symbols(["$SPX", ["GOOG", "AAPL"]]) ['GOOG', 'AAPL', '$SPX'] >>> make_symbols(" $Spy, Goog, aAPL ") ['$SPY', 'GOOG', 'AAPL']
def win_menu_select_item(title, *items, **kwargs): """ Usage: win_menu_select_item("[CLASS:Notepad]", "", u"文件(&F)", u"退出(&X)") :param title: :param text: :param items: :return: """ text = kwargs.get("text", "") if not (0 < len(items) < 8): raise ValueError("accepted none item or number of items exceed eight") f_items = [LPCWSTR(item) for item in items] for i in xrange(8 - len(f_items)): f_items.append(LPCWSTR("")) ret = AUTO_IT.AU3_WinMenuSelectItem(LPCWSTR(title), LPCWSTR(text), *f_items) return ret
Usage: win_menu_select_item("[CLASS:Notepad]", "", u"文件(&F)", u"退出(&X)") :param title: :param text: :param items: :return:
def handle_no_start_state(self): """Handles the situation, when no start state exists during execution The method waits, until a transition is created. It then checks again for an existing start state and waits again, if this is not the case. It returns the None state if the the state machine was stopped. """ start_state = self.get_start_state(set_final_outcome=True) while not start_state: # depending on the execution mode pause execution execution_signal = state_machine_execution_engine.handle_execution_mode(self) if execution_signal is StateMachineExecutionStatus.STOPPED: # this will be caught at the end of the run method return None self._transitions_cv.acquire() self._transitions_cv.wait(3.0) self._transitions_cv.release() start_state = self.get_start_state(set_final_outcome=True) return start_state
Handles the situation, when no start state exists during execution The method waits, until a transition is created. It then checks again for an existing start state and waits again, if this is not the case. It returns the None state if the the state machine was stopped.